crash-7.1.4/0000755000000000000000000000000012634305150011320 5ustar rootrootcrash-7.1.4/filesys.c0000775000000000000000000033715012634305150013160 0ustar rootroot/* filesys.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include static void show_mounts(ulong, int, struct task_context *); static int find_booted_kernel(void); static int find_booted_system_map(void); static int verify_utsname(char *); static char **build_searchdirs(int, int *); static int build_kernel_directory(char *); static int redhat_kernel_directory_v1(char *); static int redhat_kernel_directory_v2(char *); static int redhat_debug_directory(char *); static ulong *create_dentry_array(ulong, int *); static ulong *create_dentry_array_percpu(ulong, int *); static void show_fuser(char *, char *); static int mount_point(char *); static int open_file_reference(struct reference *); static void memory_source_init(void); static int get_pathname_component(ulong, ulong, int, char *, char *); static ulong *get_mount_list(int *, struct task_context *); char *inode_type(char *, char *); static void match_proc_version(void); static void get_live_memory_source(void); static int memory_driver_module_loaded(int *); static int insmod_memory_driver_module(void); static int get_memory_driver_dev(dev_t *); static int memory_driver_init(void); static int create_memory_device(dev_t); static void *radix_tree_lookup(ulong, ulong, int); static int match_file_string(char *, char *, char *); static ulong get_root_vfsmount(char *); static void check_live_arch_mismatch(void); static long get_inode_nrpages(ulong); static void dump_inode_page_cache_info(ulong); #define DENTRY_CACHE (20) #define INODE_CACHE (20) #define FILE_CACHE (20) static struct filesys_table { char *dentry_cache; ulong cached_dentry[DENTRY_CACHE]; ulong cached_dentry_hits[DENTRY_CACHE]; int dentry_cache_index; ulong dentry_cache_fills; char *inode_cache; ulong cached_inode[INODE_CACHE]; ulong cached_inode_hits[INODE_CACHE]; int inode_cache_index; ulong inode_cache_fills; char *file_cache; ulong cached_file[FILE_CACHE]; ulong cached_file_hits[FILE_CACHE]; int file_cache_index; ulong file_cache_fills; } filesys_table = { 0 }; static struct filesys_table *ft = &filesys_table; /* * Open the namelist, dumpfile and output devices. */ void fd_init(void) { pc->nfd = pc->kfd = pc->mfd = pc->dfd = -1; if ((pc->nullfp = fopen("/dev/null", "w+")) == NULL) error(INFO, "cannot open /dev/null (for extraneous output)"); if (REMOTE()) remote_fd_init(); else { if (pc->namelist && pc->namelist_debug && pc->system_map) { error(INFO, "too many namelist options:\n %s\n %s\n %s\n", pc->namelist, pc->namelist_debug, pc->system_map); program_usage(SHORT_FORM); } if (pc->namelist) { if (XEN_HYPER_MODE() && !pc->dumpfile) error(FATAL, "Xen hypervisor mode requires a dumpfile\n"); if (!pc->dumpfile && !get_proc_version()) error(INFO, "/proc/version: %s\n", strerror(errno)); } else { if (pc->dumpfile) { error(INFO, "namelist argument required\n"); program_usage(SHORT_FORM); } if (!pc->dumpfile) check_live_arch_mismatch(); if (!find_booted_kernel()) program_usage(SHORT_FORM); } if (!pc->dumpfile) { pc->flags |= LIVE_SYSTEM; get_live_memory_source(); } if ((pc->nfd = open(pc->namelist, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->namelist, strerror(errno)); else { close(pc->nfd); pc->nfd = -1; } if (ACTIVE() && !(pc->namelist_debug || pc->system_map)) { memory_source_init(); match_proc_version(); } } memory_source_init(); if (CRASHDEBUG(1)) { fprintf(fp, "readmem: %s() ", readmem_function_name()); if (ACTIVE()) { fprintf(fp, "-> %s ", pc->live_memsrc); if (pc->flags & MEMMOD) fprintf(fp, "(module)"); else if (pc->flags & CRASHBUILTIN) fprintf(fp, "(built-in)"); } fprintf(fp, "\n"); } } /* * Do whatever's necessary to handle the memory source. */ static void memory_source_init(void) { if (REMOTE() && !(pc->flags & MEMSRC_LOCAL)) return; if (pc->flags & KERNEL_DEBUG_QUERY) return; if (ACTIVE()) { if (pc->mfd != -1) /* already been here */ return; if (!STREQ(pc->live_memsrc, "/dev/mem") && STREQ(pc->live_memsrc, pc->memory_device)) { if (memory_driver_init()) return; error(INFO, "cannot initialize crash memory driver\n"); error(INFO, "using /dev/mem\n\n"); pc->flags &= ~MEMMOD; pc->flags |= DEVMEM; pc->readmem = read_dev_mem; pc->writemem = write_dev_mem; pc->live_memsrc = "/dev/mem"; } if (STREQ(pc->live_memsrc, "/dev/mem")) { if ((pc->mfd = open("/dev/mem", O_RDWR)) < 0) { if ((pc->mfd = open("/dev/mem", O_RDONLY)) < 0) error(FATAL, "/dev/mem: %s\n", strerror(errno)); } else pc->flags |= MFD_RDWR; } else if (STREQ(pc->live_memsrc, "/proc/kcore")) { if ((pc->mfd = open("/proc/kcore", O_RDONLY)) < 0) error(FATAL, "/proc/kcore: %s\n", strerror(errno)); if (!proc_kcore_init(fp)) error(FATAL, "/proc/kcore: initialization failed\n"); } else error(FATAL, "unknown memory device: %s\n", pc->live_memsrc); return; } if (pc->dumpfile) { if (!file_exists(pc->dumpfile, NULL)) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(ENOENT)); if (!(pc->flags & DUMPFILE_TYPES)) error(FATAL, "%s: dump format not supported!\n", pc->dumpfile); if (pc->flags & NETDUMP) { if (!netdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & KDUMP) { if (!kdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & XENDUMP) { if (!xendump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & KVMDUMP) { if (!kvmdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & DISKDUMP) { if (!diskdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & LKCD) { if ((pc->dfd = open(pc->dumpfile, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (!lkcd_dump_init(fp, pc->dfd, pc->dumpfile)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & S390D) { if (!s390_dump_init(pc->dumpfile)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & VMWARE_VMSS) { if (!vmware_vmss_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } } } /* * If only a namelist argument is entered for a live system, and the * version string doesn't match /proc/version, try to avert a failure * by assigning it to a matching System.map. */ static void match_proc_version(void) { char buffer[BUFSIZE], *p1, *p2; if (pc->flags & KERNEL_DEBUG_QUERY) return; if (!strlen(kt->proc_version)) return; if (match_file_string(pc->namelist, kt->proc_version, buffer)) { if (CRASHDEBUG(1)) { fprintf(fp, "/proc/version:\n%s\n", kt->proc_version); fprintf(fp, "%s:\n%s", pc->namelist, buffer); } return; } error(WARNING, "%s%sand /proc/version do not match!\n\n", pc->namelist, strlen(pc->namelist) > 39 ? "\n " : " "); /* * find_booted_system_map() requires VTOP(), which used to be a * hardwired masking of the kernel address. But some architectures * may not know what their physical base address is at this point, * and others may have different machdep->kvbase values, so for all * but the 0-based kernel virtual address architectures, bail out * here with a relevant error message. */ if (!machine_type("S390") && !machine_type("S390X")) { p1 = &kt->proc_version[strlen("Linux version ")]; p2 = strstr(p1, " "); *p2 = NULLCHAR; error(WARNING, "/proc/version indicates kernel version: %s\n", p1); error(FATAL, "please use the vmlinux file for that kernel version, or try using\n" " the System.map for that kernel version as an additional argument.\n", p1); clean_exit(1); } if (find_booted_system_map()) pc->flags |= SYSMAP; } #define CREATE 1 #define DESTROY 0 #define DEFAULT_SEARCHDIRS 5 static char ** build_searchdirs(int create, int *preferred) { int i; int cnt, start; DIR *dirp; struct dirent *dp; char dirbuf[BUFSIZE]; static char **searchdirs = { 0 }; static char *default_searchdirs[DEFAULT_SEARCHDIRS+1] = { "/usr/src/linux/", "/boot/", "/boot/efi/redhat", "/boot/efi/EFI/redhat", "/", NULL }; if (!create) { if (searchdirs) { for (i = DEFAULT_SEARCHDIRS; searchdirs[i]; i++) free(searchdirs[i]); free(searchdirs); } return NULL; } if (preferred) *preferred = 0; /* * Allow, at a minimum, the defaults plus an extra three directories * for the two possible /usr/src/redhat/BUILD/kernel-xxx locations * plus the Red Hat debug directory. */ cnt = DEFAULT_SEARCHDIRS + 3; if ((dirp = opendir("/usr/src"))) { for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) cnt++; if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { error(INFO, "/usr/src/ directory list malloc: %s\n", strerror(errno)); closedir(dirp); return default_searchdirs; } for (i = 0; i < DEFAULT_SEARCHDIRS; i++) searchdirs[i] = default_searchdirs[i]; cnt = DEFAULT_SEARCHDIRS; rewinddir(dirp); for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (STREQ(dp->d_name, "linux") || STREQ(dp->d_name, "redhat") || STREQ(dp->d_name, ".") || STREQ(dp->d_name, "..")) continue; sprintf(dirbuf, "/usr/src/%s", dp->d_name); if (mount_point(dirbuf)) continue; if (!is_directory(dirbuf)) continue; if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/ directory entry malloc: %s\n", strerror(errno)); break; } sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } closedir(dirp); searchdirs[cnt] = NULL; } else { if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { error(INFO, "search directory list malloc: %s\n", strerror(errno)); closedir(dirp); return default_searchdirs; } for (i = 0; i < DEFAULT_SEARCHDIRS; i++) searchdirs[i] = default_searchdirs[i]; cnt = DEFAULT_SEARCHDIRS; } if (build_kernel_directory(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/lib/modules/ directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_kernel_directory_v1(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/redhat directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_kernel_directory_v2(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/redhat directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_debug_directory(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "%s directory entry malloc: %s\n", dirbuf, strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); if (preferred) *preferred = cnt; cnt++; } } searchdirs[cnt] = NULL; if (CRASHDEBUG(1)) { i = start = preferred ? *preferred : 0; do { fprintf(fp, "searchdirs[%d]: %s\n", i, searchdirs[i]); if (++i == cnt) { if (start != 0) i = 0; else break; } } while (i != start); } return searchdirs; } static int build_kernel_directory(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/lib/modules/"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (*p1 != ' ') *p2++ = *p1++; strcat(buf, "/build"); return TRUE; } static int redhat_kernel_directory_v1(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/usr/src/redhat/BUILD/kernel-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; strcat(buf, "/linux"); return TRUE; } static int redhat_kernel_directory_v2(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/usr/src/redhat/BUILD/kernel-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; strcat(buf, "/linux-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; return TRUE; } static int redhat_debug_directory(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "%s/", pc->redhat_debug_loc); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (*p1 != ' ') *p2++ = *p1++; return TRUE; } /* * If a namelist was not entered, presume we're using the currently-running * kernel. Read its version string from /proc/version, and then look in * the search directories for a kernel with the same version string embedded * in it. */ static int find_booted_kernel(void) { char kernel[BUFSIZE]; char buffer[BUFSIZE]; char **searchdirs; int i, preferred, wrapped; DIR *dirp; struct dirent *dp; int found; pc->flags |= FINDKERNEL; fflush(fp); if (!file_exists("/proc/version", NULL)) { error(INFO, "/proc/version: %s: cannot determine booted kernel\n", strerror(ENOENT)); return FALSE; } if (!get_proc_version()) { error(INFO, "/proc/version: %s\n", strerror(errno)); return FALSE; } if (CRASHDEBUG(1)) fprintf(fp, "\nfind_booted_kernel: search for [%s]\n", kt->proc_version); searchdirs = build_searchdirs(CREATE, &preferred); for (i = preferred, wrapped = found = FALSE; !found; i++) { if (!searchdirs[i]) { if (preferred && !wrapped) { wrapped = TRUE; i = 0; } else break; } else if (wrapped && (preferred == i)) break; dirp = opendir(searchdirs[i]); if (!dirp) continue; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (dp->d_name[0] == '.') continue; sprintf(kernel, "%s%s", searchdirs[i], dp->d_name); if (mount_point(kernel) || !file_readable(kernel) || !is_elf_file(kernel)) continue; if (CRASHDEBUG(1)) fprintf(fp, "find_booted_kernel: check: %s\n", kernel); found = match_file_string(kernel, kt->proc_version, buffer); if (found) break; } closedir(dirp); } mount_point(DESTROY); build_searchdirs(DESTROY, NULL); if (found) { if ((pc->namelist = (char *)malloc (strlen(kernel)+1)) == NULL) error(FATAL, "booted kernel name malloc: %s\n", strerror(errno)); else { strcpy(pc->namelist, kernel); if (CRASHDEBUG(1)) fprintf(fp, "find_booted_kernel: found: %s\n", pc->namelist); return TRUE; } } error(INFO, "cannot find booted kernel -- please enter namelist argument\n\n"); return FALSE; } /* * Determine whether a file is a mount point, without the benefit of stat(). * This horrendous kludge is necessary to avoid uninterruptible stat() or * fstat() calls on nfs mount-points where the remote directory is no longer * available. */ static int mount_point(char *name) { int i; static int mount_points_gathered = -1; static char **mount_points; char *arglist[MAXARGS]; char buf[BUFSIZE]; char mntfile[BUFSIZE]; int argc, found; FILE *mp; /* * The first time through, stash a list of mount points. */ if (mount_points_gathered < 0) { found = mount_points_gathered = 0; if (file_exists("/proc/mounts", NULL)) sprintf(mntfile, "/proc/mounts"); else if (file_exists("/etc/mtab", NULL)) sprintf(mntfile, "/etc/mtab"); else return FALSE; if ((mp = fopen(mntfile, "r")) == NULL) return FALSE; while (fgets(buf, BUFSIZE, mp)) { argc = parse_line(buf, arglist); if (argc < 2) continue; found++; } pclose(mp); if (!(mount_points = (char **)malloc(sizeof(char *) * found))) return FALSE; if ((mp = fopen(mntfile, "r")) == NULL) return FALSE; i = 0; while (fgets(buf, BUFSIZE, mp) && (mount_points_gathered < found)) { argc = parse_line(buf, arglist); if (argc < 2) continue; if ((mount_points[i] = (char *) malloc(strlen(arglist[1])*2))) { strcpy(mount_points[i], arglist[1]); mount_points_gathered++, i++; } } pclose(mp); if (CRASHDEBUG(2)) for (i = 0; i < mount_points_gathered; i++) fprintf(fp, "mount_points[%d]: %s (%lx)\n", i, mount_points[i], (ulong)mount_points[i]); } /* * A null name string means we're done with this routine forever, * so the malloc'd memory can be freed. */ if (!name) { for (i = 0; i < mount_points_gathered; i++) free(mount_points[i]); free(mount_points); return FALSE; } for (i = 0; i < mount_points_gathered; i++) { if (STREQ(name, mount_points[i])) return TRUE; } return FALSE; } /* * If /proc/version exists, get it for verification purposes later. */ int get_proc_version(void) { FILE *version; if (strlen(kt->proc_version)) /* been here, done that... */ return TRUE; if (!file_exists("/proc/version", NULL)) return FALSE; if ((version = fopen("/proc/version", "r")) == NULL) return FALSE; if (fread(&kt->proc_version, sizeof(char), BUFSIZE-1, version) <= 0) { fclose(version); return FALSE; } fclose(version); strip_linefeeds(kt->proc_version); return TRUE; } /* * Given a non-matching kernel namelist, try to find a System.map file * that has a system_utsname whose contents match /proc/version. */ static int find_booted_system_map(void) { char system_map[BUFSIZE]; char **searchdirs; int i; DIR *dirp; struct dirent *dp; int found; fflush(fp); if (!file_exists("/proc/version", NULL)) { error(INFO, "/proc/version: %s: cannot determine booted System.map\n", strerror(ENOENT)); return FALSE; } if (!get_proc_version()) { error(INFO, "/proc/version: %s\n", strerror(errno)); return FALSE; } found = FALSE; /* * To avoid a search, try the obvious first. */ sprintf(system_map, "/boot/System.map"); if (file_readable(system_map) && verify_utsname(system_map)) { found = TRUE; } else { searchdirs = build_searchdirs(CREATE, NULL); for (i = 0; !found && searchdirs[i]; i++) { dirp = opendir(searchdirs[i]); if (!dirp) continue; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (!strstr(dp->d_name, "System.map")) continue; sprintf(system_map, "%s%s", searchdirs[i], dp->d_name); if (mount_point(system_map) || !file_readable(system_map) || !is_system_map(system_map)) continue; if (verify_utsname(system_map)) { found = TRUE; break; } } closedir(dirp); } mount_point(DESTROY); build_searchdirs(DESTROY, NULL); } if (found) { if ((pc->system_map = (char *)malloc (strlen(system_map)+1)) == NULL) error(FATAL, "booted system map name malloc: %s\n", strerror(errno)); strcpy(pc->system_map, system_map); if (CRASHDEBUG(1)) fprintf(fp, "find_booted_system_map: found: %s\n", pc->system_map); return TRUE; } error(INFO, "cannot find booted system map -- please enter namelist or system map\n\n"); return FALSE; } /* * Read the system_utsname from /dev/mem, based upon the address found * in the passed-in System.map file, and compare it to /proc/version. */ static int verify_utsname(char *system_map) { char buffer[BUFSIZE]; ulong value; struct new_utsname new_utsname; if (CRASHDEBUG(1)) fprintf(fp, "verify_utsname: check: %s\n", system_map); if (!match_file_string(system_map, "D system_utsname", buffer)) return FALSE; if (extract_hex(buffer, &value, NULLCHAR, TRUE) && (READMEM(pc->mfd, &new_utsname, sizeof(struct new_utsname), value, VTOP(value)) > 0) && ascii_string(new_utsname.release) && ascii_string(new_utsname.version) && STRNEQ(new_utsname.release, "2.") && (strlen(new_utsname.release) > 4) && (strlen(new_utsname.version) > 27)) { if (CRASHDEBUG(1)) { fprintf(fp, "release: [%s]\n", new_utsname.release); fprintf(fp, "version: [%s]\n", new_utsname.version); } if (strstr(kt->proc_version, new_utsname.release) && strstr(kt->proc_version, new_utsname.version)) { return TRUE; } } return FALSE; } /* * Determine whether a file exists, using the caller's stat structure if * one was passed in. */ int file_exists(char *file, struct stat *sp) { struct stat sbuf; if (stat(file, sp ? sp : &sbuf) == 0) return TRUE; return FALSE; } /* * Determine whether a file exists, and if so, if it's readable. */ int file_readable(char *file) { char tmp; int fd; if (!file_exists(file, NULL)) return FALSE; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; if (read(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) { close(fd); return FALSE; } close(fd); return TRUE; } /* * Quick file checksummer. */ int file_checksum(char *file, long *retsum) { int i; int fd; ssize_t cnt; char buf[MIN_PAGE_SIZE]; long csum; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; csum = 0; BZERO(buf, MIN_PAGE_SIZE); while ((cnt = read(fd, buf, MIN_PAGE_SIZE)) > 0) { for (i = 0; i < cnt; i++) csum += buf[i]; BZERO(buf, MIN_PAGE_SIZE); } close(fd); *retsum = csum; return TRUE; } int is_directory(char *file) { struct stat sbuf; if (!file || !strlen(file)) return(FALSE); if (stat(file, &sbuf) == -1) return(FALSE); /* This file doesn't exist. */ return((sbuf.st_mode & S_IFMT) == S_IFDIR ? TRUE : FALSE); } /* * Search a directory tree for filename, and if found, return a temporarily * allocated buffer containing the full pathname. The "done" business is * protection against fgets() prematurely returning NULL before the find * command completes. (I thought this was impossible until I saw it happen...) * When time permits, rewrite this doing the search by hand. */ char * search_directory_tree(char *directory, char *file, int follow_links) { char command[BUFSIZE]; char buf[BUFSIZE]; char *retbuf, *start, *end, *module; FILE *pipe; regex_t regex; int regex_used, done; if (!file_exists("/usr/bin/find", NULL) || !file_exists("/bin/echo", NULL) || !is_directory(directory) || (*file == '(')) return NULL; sprintf(command, "/usr/bin/find %s %s -name %s -print; /bin/echo search done", follow_links ? "-L" : "", directory, file); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s\n", command, strerror(errno)); return NULL; } done = FALSE; retbuf = NULL; regex_used = ((start = strstr(file, "[")) && (end = strstr(file, "]")) && (start < end) && (regcomp(®ex, file, 0) == 0)); while (fgets(buf, BUFSIZE-1, pipe) || !done) { if (STREQ(buf, "search done\n")) { done = TRUE; break; } if (!retbuf && !regex_used && STREQ((char *)basename(strip_linefeeds(buf)), file)) { retbuf = GETBUF(strlen(buf)+1); strcpy(retbuf, buf); } if (!retbuf && regex_used) { module = basename(strip_linefeeds(buf)); if (regexec(®ex, module, 0, NULL, 0) == 0) { retbuf = GETBUF(strlen(buf)+1); strcpy(retbuf, buf); } } } if (regex_used) regfree(®ex); pclose(pipe); return retbuf; } /* * Determine whether a file exists, and if so, if it's a tty. */ int is_a_tty(char *filename) { int fd; if ((fd = open(filename, O_RDONLY)) < 0) return FALSE; if (isatty(fd)) { close(fd); return TRUE; } close(fd); return FALSE; } /* * Open a tmpfile for command output. fp is stashed in pc->saved_fp, and * temporarily set to the new FILE pointer. This allows a command to still * print to the original output while the tmpfile is still open. */ #define OPEN_ONLY_ONCE #ifdef OPEN_ONLY_ONCE void open_tmpfile(void) { int ret ATTRIBUTE_UNUSED; if (pc->tmpfile) error(FATAL, "recursive temporary file usage\n"); if (!pc->tmp_fp) { if ((pc->tmp_fp = tmpfile()) == NULL) error(FATAL, "cannot open temporary file\n"); } fflush(pc->tmpfile); ret = ftruncate(fileno(pc->tmp_fp), 0); rewind(pc->tmp_fp); pc->tmpfile = pc->tmp_fp; pc->saved_fp = fp; fp = pc->tmpfile; } #else void open_tmpfile(void) { if (pc->tmpfile) error(FATAL, "recursive temporary file usage\n"); if ((pc->tmpfile = tmpfile()) == NULL) { error(FATAL, "cannot open temporary file\n"); } else { pc->saved_fp = fp; fp = pc->tmpfile; } } #endif /* * Destroy the reference to the tmpfile, and restore fp to the state * it had when open_tmpfile() was called. */ #ifdef OPEN_ONLY_ONCE void close_tmpfile(void) { int ret ATTRIBUTE_UNUSED; if (pc->tmpfile) { fflush(pc->tmpfile); ret = ftruncate(fileno(pc->tmpfile), 0); rewind(pc->tmpfile); pc->tmpfile = NULL; fp = pc->saved_fp; } else error(FATAL, "trying to close an unopened temporary file\n"); } #else void close_tmpfile(void) { if (pc->tmpfile) { fp = pc->saved_fp; fclose(pc->tmpfile); pc->tmpfile = NULL; } else error(FATAL, "trying to close an unopened temporary file\n"); } #endif /* * open_tmpfile2(), set_tmpfile2() and close_tmpfile2() do not use a * permanent tmpfile, and do NOT modify the global fp pointer or pc->saved_fp. * That being the case, all wrapped functions must be aware of it, or the * global fp pointer has to explicitly manipulated by the calling function. * The secondary tmpfile should only be used by common functions that might * be called by a higher-level function using the primary permanent tmpfile, * or alternatively a caller may pass in a FILE pointer to set_tmpfile2(). */ void open_tmpfile2(void) { if (pc->tmpfile2) error(FATAL, "recursive secondary temporary file usage\n"); if ((pc->tmpfile2 = tmpfile()) == NULL) error(FATAL, "cannot open secondary temporary file\n"); rewind(pc->tmpfile2); } void close_tmpfile2(void) { if (pc->tmpfile2) { fflush(pc->tmpfile2); fclose(pc->tmpfile2); pc->tmpfile2 = NULL; } } void set_tmpfile2(FILE *fptr) { if (pc->tmpfile2) error(FATAL, "secondary temporary file already in use\n"); pc->tmpfile2 = fptr; } #define MOUNT_PRINT_INODES 0x1 #define MOUNT_PRINT_FILES 0x2 /* * Display basic information about the currently mounted filesystems. * The -f option lists the open files for the filesystem(s). * The -i option dumps the dirty inodes of the filesystem(s). * If an inode address, mount, vfsmount, superblock, device name or * directory name is also entered, just show the data for the * filesystem indicated by the argument. */ static char mount_hdr[BUFSIZE] = { 0 }; void cmd_mount(void) { int i; int c, found; struct task_context *tc, *namespace_context; ulong value1, value2; char *spec_string; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS*2]; ulong vfsmount = 0; int flags = 0; int save_next; ulong pid; /* find a context */ pid = 1; while ((namespace_context = pid_to_context(pid)) == NULL) pid++; while ((c = getopt(argcnt, args, "ifn:")) != EOF) { switch(c) { case 'i': if (INVALID_MEMBER(super_block_s_dirty)) { error(INFO, "the super_block.s_dirty linked list does " "not exist in this kernel\n"); option_not_supported(c); } flags |= MOUNT_PRINT_INODES; break; case 'f': flags |= MOUNT_PRINT_FILES; break; case 'n': switch (str_to_context(optarg, &value1, &tc)) { case STR_PID: case STR_TASK: namespace_context = tc; break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind] == 0) { show_mounts(0, flags, namespace_context); return; } /* * Dump everything into a tmpfile, and then walk * through it for each search argument entered. */ open_tmpfile(); show_mounts(0, MOUNT_PRINT_FILES | (VALID_MEMBER(super_block_s_dirty) ? MOUNT_PRINT_INODES : 0), namespace_context); pc->curcmd_flags &= ~HEADER_PRINTED; do { spec_string = args[optind]; if (STRNEQ(spec_string, "0x") && hexadecimal(spec_string, 0)) shift_string_left(spec_string, 2); found = FALSE; rewind(pc->tmpfile); save_next = 0; while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf1, mount_hdr)) { save_next = TRUE; continue; } if (save_next) { strcpy(buf2, buf1); save_next = FALSE; } if (!(c = parse_line(buf1, arglist))) continue; for (i = 0; i < c; i++) { if (PATHEQ(arglist[i], spec_string)) found = TRUE; /* * Check for a vfsmount address * embedded in a struct mount. */ if ((i == 0) && (c == 5) && VALID_MEMBER(mount_mnt) && hexadecimal(spec_string, 0) && hexadecimal(arglist[i], 0)) { value1 = htol(spec_string, FAULT_ON_ERROR, NULL); value2 = htol(arglist[i], FAULT_ON_ERROR, NULL) + OFFSET(mount_mnt); if (value1 == value2) found = TRUE; } } if (found) { fp = pc->saved_fp; if (flags) { sscanf(buf2,"%lx", &vfsmount); show_mounts(vfsmount, flags, namespace_context); } else { if (!(pc->curcmd_flags & HEADER_PRINTED)) { fprintf(fp, "%s", mount_hdr); pc->curcmd_flags |= HEADER_PRINTED; } fprintf(fp, "%s", buf2); } found = FALSE; fp = pc->tmpfile; } } } while (args[++optind]); close_tmpfile(); } /* * Do the work for cmd_mount(); */ static void show_mounts(ulong one_vfsmount, int flags, struct task_context *namespace_context) { ulong one_vfsmount_list; long sb_s_files; long s_dirty; ulong devp, dirp, sbp, dirty, type, name; struct list_data list_data, *ld; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; ulong *dentry_list, *dp, *mntlist; ulong *vfsmnt; char *vfsmount_buf, *super_block_buf, *mount_buf; ulong dentry, inode, inode_sb, mnt_parent; char *dentry_buf, *inode_buf; int cnt, i, m, files_header_printed; int mount_cnt; int devlen; char mount_files_header[BUFSIZE]; long per_cpu_s_files; sprintf(mount_files_header, "%s%s%s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); dirp = dentry = mnt_parent = sb_s_files = s_dirty = 0; if (VALID_MEMBER(super_block_s_dirty)) s_dirty = OFFSET(super_block_s_dirty); per_cpu_s_files = MEMBER_EXISTS("file", "f_sb_list_cpu"); dentry_list = NULL; mntlist = 0; ld = &list_data; if (one_vfsmount) { one_vfsmount_list = one_vfsmount; mount_cnt = 1; mntlist = &one_vfsmount_list; } else mntlist = get_mount_list(&mount_cnt, namespace_context); devlen = strlen("DEVNAME")+2; if (!strlen(mount_hdr)) { snprintf(mount_hdr, sizeof(mount_hdr), "%s %s %s %s DIRNAME\n", mkstring(buf1, VADDR_PRLEN, CENTER, VALID_STRUCT(mount) ? "MOUNT" : "VFSMOUNT"), mkstring(buf2, VADDR_PRLEN, CENTER, "SUPERBLK"), mkstring(buf3, strlen("rootfs"), LJUST, "TYPE"), mkstring(buf4, devlen, LJUST, "DEVNAME")); } if (flags == 0) fprintf(fp, "%s", mount_hdr); sb_s_files = VALID_MEMBER(super_block_s_files) ? OFFSET(super_block_s_files) : INVALID_OFFSET; if ((flags & MOUNT_PRINT_FILES) && (sb_s_files == INVALID_OFFSET)) { /* * super_block.s_files deprecated */ if (!kernel_symbol_exists("inuse_filps")) { error(INFO, "the super_block.s_files linked list does " "not exist in this kernel\n"); option_not_supported('f'); } /* * No open files list in super_block (2.2). * Use inuse_filps list instead. */ dentry_list = create_dentry_array(symbol_value("inuse_filps"), &cnt); } if (VALID_STRUCT(mount)) { mount_buf = GETBUF(SIZE(mount)); vfsmount_buf = mount_buf + OFFSET(mount_mnt); } else { mount_buf = NULL; vfsmount_buf = GETBUF(SIZE(vfsmount)); } super_block_buf = GETBUF(SIZE(super_block)); for (m = 0, vfsmnt = mntlist; m < mount_cnt; m++, vfsmnt++) { if (VALID_STRUCT(mount)) { readmem(*vfsmnt, KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); devp = ULONG(mount_buf + OFFSET(mount_mnt_devname)); } else { readmem(*vfsmnt, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); devp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_devname)); } if (VALID_MEMBER(vfsmount_mnt_dirname)) { dirp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_dirname)); } else { if (VALID_STRUCT(mount)) { mnt_parent = ULONG(mount_buf + OFFSET(mount_mnt_parent)); dentry = ULONG(mount_buf + OFFSET(mount_mnt_mountpoint)); } else { mnt_parent = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_parent)); dentry = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_mountpoint)); } } sbp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (flags) fprintf(fp, "%s", mount_hdr); fprintf(fp, "%s %s ", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(*vfsmnt)), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(sbp))); readmem(sbp, KVADDR, super_block_buf, SIZE(super_block), "super_block buffer", FAULT_ON_ERROR); type = ULONG(super_block_buf + OFFSET(super_block_s_type)); readmem(type + OFFSET(file_system_type_name), KVADDR, &name, sizeof(void *), "file_system_type name", FAULT_ON_ERROR); if (read_string(name, buf1, BUFSIZE-1)) sprintf(buf3, "%-6s ", buf1); else sprintf(buf3, "unknown "); if (read_string(devp, buf1, BUFSIZE-1)) sprintf(buf4, "%s ", mkstring(buf2, devlen, LJUST, buf1)); else sprintf(buf4, "%s ", mkstring(buf2, devlen, LJUST, "(unknown)")); sprintf(buf1, "%s%s", buf3, buf4); while ((strlen(buf1) > 17) && (buf1[strlen(buf1)-2] == ' ')) strip_ending_char(buf1, ' '); fprintf(fp, "%s", buf1); if (VALID_MEMBER(vfsmount_mnt_dirname)) { if (read_string(dirp, buf1, BUFSIZE-1)) fprintf(fp, "%-10s\n", buf1); else fprintf(fp, "%-10s\n", "(unknown)"); } else { get_pathname(dentry, buf1, BUFSIZE, 1, VALID_STRUCT(mount) ? mnt_parent + OFFSET(mount_mnt) : mnt_parent); fprintf(fp, "%-10s\n", buf1); } if (flags & MOUNT_PRINT_FILES) { if (sb_s_files != INVALID_OFFSET) { dentry_list = per_cpu_s_files ? create_dentry_array_percpu(sbp+ sb_s_files, &cnt) : create_dentry_array(sbp+sb_s_files, &cnt); } files_header_printed = 0; for (i=0, dp = dentry_list; iflags = VERBOSE; ld->start = dirty; ld->end = (sbp+s_dirty); ld->header = "DIRTY INODES\n"; hq_open(); do_list(ld); hq_close(); } else { fprintf(fp, "DIRTY INODES\nNo dirty inodes found\n"); } } if (flags && !one_vfsmount) fprintf(fp, "\n"); } if (!one_vfsmount) FREEBUF(mntlist); if (VALID_STRUCT(mount)) FREEBUF(mount_buf); else FREEBUF(vfsmount_buf); FREEBUF(super_block_buf); } /* * Allocate and fill a list of the currently-mounted vfsmount pointers. */ static ulong * get_mount_list(int *cntptr, struct task_context *namespace_context) { struct list_data list_data, *ld; ulong namespace, root, nsproxy, mnt_ns; struct task_context *tc; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; if (symbol_exists("vfsmntlist")) { get_symbol_data("vfsmntlist", sizeof(void *), &ld->start); ld->end = symbol_value("vfsmntlist"); } else if (VALID_MEMBER(task_struct_nsproxy)) { tc = namespace_context; readmem(tc->task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy, sizeof(void *), "task nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy + OFFSET(nsproxy_mnt_ns), KVADDR, &mnt_ns, sizeof(void *), "nsproxy mnt_ns", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); if (!readmem(mnt_ns + OFFSET(mnt_namespace_root), KVADDR, &root, sizeof(void *), "mnt_namespace root", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); ld->start = root + OFFSET_OPTION(vfsmount_mnt_list, mount_mnt_list); ld->end = mnt_ns + OFFSET(mnt_namespace_list); } else if (VALID_MEMBER(namespace_root)) { tc = namespace_context; readmem(tc->task + OFFSET(task_struct_namespace), KVADDR, &namespace, sizeof(void *), "task namespace", FAULT_ON_ERROR); if (!readmem(namespace + OFFSET(namespace_root), KVADDR, &root, sizeof(void *), "namespace root", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); if (CRASHDEBUG(1)) console("namespace: %lx => root: %lx\n", namespace, root); ld->start = root + OFFSET_OPTION(vfsmount_mnt_list, mount_mnt_list); ld->end = namespace + OFFSET(namespace_list); } else error(FATAL, "cannot determine mount list location!\n"); if (VALID_MEMBER(vfsmount_mnt_list)) ld->list_head_offset = OFFSET(vfsmount_mnt_list); else if (VALID_STRUCT(mount)) ld->list_head_offset = OFFSET(mount_mnt_list); else ld->member_offset = OFFSET(vfsmount_mnt_next); *cntptr = do_list(ld); return(ld->list_ptr); } /* * Given a dentry, display its address, inode, super_block, pathname. */ static void display_dentry_info(ulong dentry) { int m, found; char *dentry_buf, *inode_buf, *vfsmount_buf, *mount_buf; ulong inode, superblock, sb, vfs; ulong *mntlist, *vfsmnt; char pathname[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; int mount_cnt; fprintf(fp, "%s%s%s%s%s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "SUPERBLK"), space(MINSPACE), space(MINSPACE)); dentry_buf = fill_dentry_cache(dentry); inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); pathname[0] = NULLCHAR; if (inode) { inode_buf = fill_inode_cache(inode); superblock = ULONG(inode_buf + OFFSET(inode_i_sb)); } else { inode_buf = NULL; superblock = 0; } if (!inode || !superblock) goto nopath; if (VALID_MEMBER(file_f_vfsmnt)) { mntlist = get_mount_list(&mount_cnt, pid_to_context(1)); if (VALID_STRUCT(mount)) { mount_buf = GETBUF(SIZE(mount)); vfsmount_buf = mount_buf + OFFSET(mount_mnt); } else { mount_buf = NULL; vfsmount_buf = GETBUF(SIZE(vfsmount)); } for (m = found = 0, vfsmnt = mntlist; m < mount_cnt; m++, vfsmnt++) { if (VALID_STRUCT(mount)) readmem(*vfsmnt, KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(*vfsmnt, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, VALID_STRUCT(mount) ? *vfsmnt+OFFSET(mount_mnt) : *vfsmnt); found = TRUE; } } if (!found && symbol_exists("pipe_mnt")) { get_symbol_data("pipe_mnt", sizeof(long), &vfs); if (VALID_STRUCT(mount)) readmem(vfs - OFFSET(mount_mnt), KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(vfs, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, vfs); found = TRUE; } } if (!found && symbol_exists("sock_mnt")) { get_symbol_data("sock_mnt", sizeof(long), &vfs); if (VALID_STRUCT(mount)) readmem(vfs - OFFSET(mount_mnt), KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(vfs, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, vfs); found = TRUE; } } } else { mntlist = 0; get_pathname(dentry, pathname, BUFSIZE, 1, 0); } if (mntlist) { FREEBUF(mntlist); if (VALID_STRUCT(mount)) FREEBUF(mount_buf); else FREEBUF(vfsmount_buf); } nopath: fprintf(fp, "%s%s%s%s%s%s%s%s%s\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(dentry)), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(inode)), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(superblock)), space(MINSPACE), inode ? inode_type(inode_buf, pathname) : "N/A", space(MINSPACE), pathname); } /* * Return a 4-character type string of an inode, modifying a previously * gathered pathname if necessary. */ char * inode_type(char *inode_buf, char *pathname) { char *type; uint32_t umode32; uint16_t umode16; uint mode; ulong inode_i_op; ulong inode_i_fop; long i_fop_off; mode = umode16 = umode32 = 0; switch (SIZE(umode_t)) { case SIZEOF_32BIT: umode32 = UINT(inode_buf + OFFSET(inode_i_mode)); mode = umode32; break; case SIZEOF_16BIT: umode16 = USHORT(inode_buf + OFFSET(inode_i_mode)); mode = (uint)umode16; break; } type = "UNKN"; if (S_ISREG(mode)) type = "REG "; if (S_ISLNK(mode)) type = "LNK "; if (S_ISDIR(mode)) type = "DIR "; if (S_ISCHR(mode)) type = "CHR "; if (S_ISBLK(mode)) type = "BLK "; if (S_ISFIFO(mode)) { type = "FIFO"; if (symbol_exists("pipe_inode_operations")) { inode_i_op = ULONG(inode_buf + OFFSET(inode_i_op)); if (inode_i_op == symbol_value("pipe_inode_operations")) { type = "PIPE"; pathname[0] = NULLCHAR; } } else { if (symbol_exists("rdwr_pipe_fops") && (i_fop_off = OFFSET(inode_i_fop)) > 0) { inode_i_fop = ULONG(inode_buf + i_fop_off); if (inode_i_fop == symbol_value("rdwr_pipe_fops")) { type = "PIPE"; pathname[0] = NULLCHAR; } } } } if (S_ISSOCK(mode)) { type = "SOCK"; if (STREQ(pathname, "/")) pathname[0] = NULLCHAR; } return type; } /* * Walk an open file list and return an array of open dentries. */ static ulong * create_dentry_array(ulong list_addr, int *count) { struct list_data list_data, *ld; ulong *file, *files_list, *dentry_list; ulong dentry, inode; char *file_buf, *dentry_buf; int cnt, f_count, i; int dentry_cnt = 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); readmem(list_addr, KVADDR, &ld->start, sizeof(void *), "file list head", FAULT_ON_ERROR); if (list_addr == ld->start) { /* empty list? */ *count = 0; return NULL; } ld->end = list_addr; hq_open(); cnt = do_list(ld); if (cnt == 0) { hq_close(); *count = 0; return NULL; } files_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(files_list, cnt); hq_close(); hq_open(); for (i=0, file = files_list; i__per_cpu_offset[c]; percpu_list[c].dentry_list = create_dentry_array(list_addr, &percpu_list[c].count); total += percpu_list[c].count; } if (total) { dentry_list = (ulong *)GETBUF(total * sizeof(ulong)); for (c = i = 0; c < (cpu+1); c++) { if (percpu_list[c].count == 0) continue; for (j = 0; j < percpu_list[c].count; j++) dentry_list[i++] = percpu_list[c].dentry_list[j]; FREEBUF(percpu_list[c].dentry_list); } } else dentry_list = NULL; FREEBUF(percpu_list); *count = total; return dentry_list; } /* * Stash vfs structure offsets */ void vfs_init(void) { MEMBER_OFFSET_INIT(nlm_file_f_file, "nlm_file", "f_file"); MEMBER_OFFSET_INIT(task_struct_files, "task_struct", "files"); MEMBER_OFFSET_INIT(task_struct_fs, "task_struct", "fs"); MEMBER_OFFSET_INIT(fs_struct_root, "fs_struct", "root"); MEMBER_OFFSET_INIT(fs_struct_pwd, "fs_struct", "pwd"); MEMBER_OFFSET_INIT(fs_struct_rootmnt, "fs_struct", "rootmnt"); MEMBER_OFFSET_INIT(fs_struct_pwdmnt, "fs_struct", "pwdmnt"); MEMBER_OFFSET_INIT(files_struct_open_fds_init, "files_struct", "open_fds_init"); MEMBER_OFFSET_INIT(files_struct_fdt, "files_struct", "fdt"); if (VALID_MEMBER(files_struct_fdt)) { MEMBER_OFFSET_INIT(fdtable_max_fds, "fdtable", "max_fds"); MEMBER_OFFSET_INIT(fdtable_max_fdset, "fdtable", "max_fdset"); MEMBER_OFFSET_INIT(fdtable_open_fds, "fdtable", "open_fds"); MEMBER_OFFSET_INIT(fdtable_fd, "fdtable", "fd"); } else { MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds"); MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset"); MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds"); MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd"); } MEMBER_OFFSET_INIT(file_f_dentry, "file", "f_dentry"); MEMBER_OFFSET_INIT(file_f_vfsmnt, "file", "f_vfsmnt"); MEMBER_OFFSET_INIT(file_f_count, "file", "f_count"); MEMBER_OFFSET_INIT(path_mnt, "path", "mnt"); MEMBER_OFFSET_INIT(path_dentry, "path", "dentry"); if (INVALID_MEMBER(file_f_dentry)) { MEMBER_OFFSET_INIT(file_f_path, "file", "f_path"); ASSIGN_OFFSET(file_f_dentry) = OFFSET(file_f_path) + OFFSET(path_dentry); ASSIGN_OFFSET(file_f_vfsmnt) = OFFSET(file_f_path) + OFFSET(path_mnt); } MEMBER_OFFSET_INIT(dentry_d_inode, "dentry", "d_inode"); MEMBER_OFFSET_INIT(dentry_d_parent, "dentry", "d_parent"); MEMBER_OFFSET_INIT(dentry_d_covers, "dentry", "d_covers"); MEMBER_OFFSET_INIT(dentry_d_name, "dentry", "d_name"); MEMBER_OFFSET_INIT(dentry_d_iname, "dentry", "d_iname"); MEMBER_OFFSET_INIT(inode_i_mode, "inode", "i_mode"); MEMBER_OFFSET_INIT(inode_i_op, "inode", "i_op"); MEMBER_OFFSET_INIT(inode_i_sb, "inode", "i_sb"); MEMBER_OFFSET_INIT(inode_u, "inode", "u"); MEMBER_OFFSET_INIT(qstr_name, "qstr", "name"); MEMBER_OFFSET_INIT(qstr_len, "qstr", "len"); if (INVALID_MEMBER(qstr_len)) ANON_MEMBER_OFFSET_INIT(qstr_len, "qstr", "len"); MEMBER_OFFSET_INIT(vfsmount_mnt_next, "vfsmount", "mnt_next"); MEMBER_OFFSET_INIT(vfsmount_mnt_devname, "vfsmount", "mnt_devname"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_devname, "mount", "mnt_devname"); MEMBER_OFFSET_INIT(vfsmount_mnt_dirname, "vfsmount", "mnt_dirname"); MEMBER_OFFSET_INIT(vfsmount_mnt_sb, "vfsmount", "mnt_sb"); MEMBER_OFFSET_INIT(vfsmount_mnt_list, "vfsmount", "mnt_list"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_list, "mount", "mnt_list"); MEMBER_OFFSET_INIT(vfsmount_mnt_parent, "vfsmount", "mnt_parent"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_parent, "mount", "mnt_parent"); MEMBER_OFFSET_INIT(vfsmount_mnt_mountpoint, "vfsmount", "mnt_mountpoint"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_mountpoint, "mount", "mnt_mountpoint"); MEMBER_OFFSET_INIT(mount_mnt, "mount", "mnt"); MEMBER_OFFSET_INIT(namespace_root, "namespace", "root"); MEMBER_OFFSET_INIT(task_struct_nsproxy, "task_struct", "nsproxy"); if (VALID_MEMBER(namespace_root)) { MEMBER_OFFSET_INIT(namespace_list, "namespace", "list"); MEMBER_OFFSET_INIT(task_struct_namespace, "task_struct", "namespace"); } else if (VALID_MEMBER(task_struct_nsproxy)) { MEMBER_OFFSET_INIT(nsproxy_mnt_ns, "nsproxy", "mnt_ns"); MEMBER_OFFSET_INIT(mnt_namespace_root, "mnt_namespace", "root"); MEMBER_OFFSET_INIT(mnt_namespace_list, "mnt_namespace", "list"); } else if (THIS_KERNEL_VERSION >= LINUX(2,4,20)) { if (CRASHDEBUG(2)) fprintf(fp, "hardwiring namespace stuff\n"); ASSIGN_OFFSET(task_struct_namespace) = OFFSET(task_struct_files) + sizeof(void *); ASSIGN_OFFSET(namespace_root) = sizeof(void *); ASSIGN_OFFSET(namespace_list) = sizeof(void *) * 2; } MEMBER_OFFSET_INIT(super_block_s_dirty, "super_block", "s_dirty"); MEMBER_OFFSET_INIT(super_block_s_type, "super_block", "s_type"); MEMBER_OFFSET_INIT(file_system_type_name, "file_system_type", "name"); MEMBER_OFFSET_INIT(super_block_s_files, "super_block", "s_files"); MEMBER_OFFSET_INIT(inode_i_flock, "inode", "i_flock"); MEMBER_OFFSET_INIT(file_lock_fl_owner, "file_lock", "fl_owner"); MEMBER_OFFSET_INIT(nlm_host_h_exportent, "nlm_host", "h_exportent"); MEMBER_OFFSET_INIT(svc_client_cl_ident, "svc_client", "cl_ident"); MEMBER_OFFSET_INIT(inode_i_fop, "inode","i_fop"); STRUCT_SIZE_INIT(umode_t, "umode_t"); STRUCT_SIZE_INIT(dentry, "dentry"); STRUCT_SIZE_INIT(files_struct, "files_struct"); if (VALID_MEMBER(files_struct_fdt)) STRUCT_SIZE_INIT(fdtable, "fdtable"); STRUCT_SIZE_INIT(file, "file"); STRUCT_SIZE_INIT(inode, "inode"); STRUCT_SIZE_INIT(mount, "mount"); STRUCT_SIZE_INIT(vfsmount, "vfsmount"); STRUCT_SIZE_INIT(fs_struct, "fs_struct"); STRUCT_SIZE_INIT(super_block, "super_block"); if (!(ft->file_cache = (char *)malloc(SIZE(file)*FILE_CACHE))) error(FATAL, "cannot malloc file cache\n"); if (!(ft->dentry_cache = (char *)malloc(SIZE(dentry)*DENTRY_CACHE))) error(FATAL, "cannot malloc dentry cache\n"); if (!(ft->inode_cache = (char *)malloc(SIZE(inode)*INODE_CACHE))) error(FATAL, "cannot malloc inode cache\n"); if (symbol_exists("height_to_maxindex")) { int tmp ATTRIBUTE_UNUSED; if (LKCD_KERNTYPES()) ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxindex", "radix_tree_preload.nodes", NULL, 0); else ARRAY_LENGTH_INIT(tmp, height_to_maxindex, "height_to_maxindex", NULL, 0); STRUCT_SIZE_INIT(radix_tree_root, "radix_tree_root"); STRUCT_SIZE_INIT(radix_tree_node, "radix_tree_node"); MEMBER_OFFSET_INIT(radix_tree_root_height, "radix_tree_root","height"); MEMBER_OFFSET_INIT(radix_tree_root_rnode, "radix_tree_root","rnode"); MEMBER_OFFSET_INIT(radix_tree_node_slots, "radix_tree_node","slots"); MEMBER_OFFSET_INIT(radix_tree_node_height, "radix_tree_node","height"); } MEMBER_OFFSET_INIT(rb_root_rb_node, "rb_root","rb_node"); MEMBER_OFFSET_INIT(rb_node_rb_left, "rb_node","rb_left"); MEMBER_OFFSET_INIT(rb_node_rb_right, "rb_node","rb_right"); } void dump_filesys_table(int verbose) { int i; ulong fhits, dhits, ihits; if (!verbose) goto show_hit_rates; for (i = 0; i < FILE_CACHE; i++) fprintf(fp, " cached_file[%2d]: %lx (%ld)\n", i, ft->cached_file[i], ft->cached_file_hits[i]); fprintf(fp, " file_cache: %lx\n", (ulong)ft->file_cache); fprintf(fp, " file_cache_index: %d\n", ft->file_cache_index); fprintf(fp, " file_cache_fills: %ld\n", ft->file_cache_fills); for (i = 0; i < DENTRY_CACHE; i++) fprintf(fp, " cached_dentry[%2d]: %lx (%ld)\n", i, ft->cached_dentry[i], ft->cached_dentry_hits[i]); fprintf(fp, " dentry_cache: %lx\n", (ulong)ft->dentry_cache); fprintf(fp, "dentry_cache_index: %d\n", ft->dentry_cache_index); fprintf(fp, "dentry_cache_fills: %ld\n", ft->dentry_cache_fills); for (i = 0; i < INODE_CACHE; i++) fprintf(fp, " cached_inode[%2d]: %lx (%ld)\n", i, ft->cached_inode[i], ft->cached_inode_hits[i]); fprintf(fp, " inode_cache: %lx\n", (ulong)ft->inode_cache); fprintf(fp, " inode_cache_index: %d\n", ft->inode_cache_index); fprintf(fp, " inode_cache_fills: %ld\n", ft->inode_cache_fills); show_hit_rates: if (ft->file_cache_fills) { for (i = fhits = 0; i < FILE_CACHE; i++) fhits += ft->cached_file_hits[i]; fprintf(fp, " file hit rate: %2ld%% (%ld of %ld)\n", (fhits * 100)/ft->file_cache_fills, fhits, ft->file_cache_fills); } if (ft->dentry_cache_fills) { for (i = dhits = 0; i < DENTRY_CACHE; i++) dhits += ft->cached_dentry_hits[i]; fprintf(fp, " dentry hit rate: %2ld%% (%ld of %ld)\n", (dhits * 100)/ft->dentry_cache_fills, dhits, ft->dentry_cache_fills); } if (ft->inode_cache_fills) { for (i = ihits = 0; i < INODE_CACHE; i++) ihits += ft->cached_inode_hits[i]; fprintf(fp, " inode hit rate: %2ld%% (%ld of %ld)\n", (ihits * 100)/ft->inode_cache_fills, ihits, ft->inode_cache_fills); } } /* * Get the page count for the specific mapping */ static long get_inode_nrpages(ulong i_mapping) { char *address_space_buf; ulong nrpages; address_space_buf = GETBUF(SIZE(address_space)); readmem(i_mapping, KVADDR, address_space_buf, SIZE(address_space), "address_space buffer", FAULT_ON_ERROR); nrpages = ULONG(address_space_buf + OFFSET(address_space_nrpages)); FREEBUF(address_space_buf); return nrpages; } static void dump_inode_page_cache_info(ulong inode) { char *inode_buf; ulong i_mapping, nrpages, root_rnode, count; struct radix_tree_pair rtp; char header[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; inode_buf = GETBUF(SIZE(inode)); readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", FAULT_ON_ERROR); i_mapping = ULONG(inode_buf + OFFSET(inode_i_mapping)); nrpages = get_inode_nrpages(i_mapping); sprintf(header, "%s NRPAGES\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "INODE")); fprintf(fp, "%s", header); fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST|LONG_HEX, MKSTR(inode)), mkstring(buf2, strlen("NRPAGES"), RJUST|LONG_DEC, MKSTR(nrpages))); root_rnode = i_mapping + OFFSET(address_space_page_tree); rtp.index = 0; rtp.value = (void *)&dump_inode_page; count = do_radix_tree(root_rnode, RADIX_TREE_DUMP_CB, &rtp); if (count != nrpages) error(INFO, "page_tree count: %ld nrpages: %ld\n", count, nrpages); FREEBUF(inode_buf); return; } /* * This command displays information about the open files of a context. * For each open file descriptor the file descriptor number, a pointer * to the file struct, pointer to the dentry struct, pointer to the inode * struct, indication of file type and pathname are printed. * The argument can be a task address or a PID number; if no args, the * current context is used. * If the flag -l is passed, any files held open in the kernel by the * lockd server on behalf of an NFS client are displayed. */ void cmd_files(void) { int c; ulong value; struct task_context *tc; int subsequent; struct reference reference, *ref; char *refarg; int open_flags = 0; ref = NULL; refarg = NULL; while ((c = getopt(argcnt, args, "d:R:p:c")) != EOF) { switch(c) { case 'R': if (ref) { error(INFO, "only one -R option allowed\n"); argerrs++; } else { ref = &reference; BZERO(ref, sizeof(struct reference)); ref->str = refarg = optarg; } break; case 'd': value = htol(optarg, FAULT_ON_ERROR, NULL); display_dentry_info(value); return; case 'p': if (VALID_MEMBER(address_space_page_tree) && VALID_MEMBER(inode_i_mapping)) { value = htol(optarg, FAULT_ON_ERROR, NULL); dump_inode_page_cache_info(value); } else option_not_supported('p'); return; case 'c': if (VALID_MEMBER(address_space_page_tree) && VALID_MEMBER(inode_i_mapping)) open_flags |= PRINT_NRPAGES; else option_not_supported('c'); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { if (!ref) print_task_header(fp, CURRENT_CONTEXT(), 0); open_files_dump(CURRENT_TASK(), open_flags, ref); return; } subsequent = 0; while (args[optind]) { if (ref && subsequent) { BZERO(ref, sizeof(struct reference)); ref->str = refarg; } switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!ref) print_task_header(fp, tc, subsequent); open_files_dump(tc->task, open_flags, ref); fprintf(fp, "\n"); } break; case STR_TASK: if (!ref) print_task_header(fp, tc, subsequent); open_files_dump(tc->task, open_flags, ref); break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n", args[optind]); break; } subsequent++; optind++; } } #define FILES_REF_HEXNUM (0x1) #define FILES_REF_DECNUM (0x2) #define FILES_REF_FOUND (0x4) #define PRINT_FILE_REFERENCE() \ if (!root_pwd_printed) { \ print_task_header(fp, tc, 0); \ fprintf(fp, "%s", root_pwd); \ root_pwd_printed = TRUE; \ } \ if (!header_printed) { \ fprintf(fp, "%s", files_header);\ header_printed = TRUE; \ } \ fprintf(fp, "%s", buf4); \ ref->cmdflags |= FILES_REF_FOUND; #define FILENAME_COMPONENT(P,C) \ ((STREQ((P), "/") && STREQ((C), "/")) || \ (!STREQ((C), "/") && strstr((P),(C)))) /* * open_files_dump() does the work for cmd_files(). */ void open_files_dump(ulong task, int flags, struct reference *ref) { struct task_context *tc; ulong files_struct_addr; ulong fdtable_addr = 0; char *files_struct_buf, *fdtable_buf = NULL; ulong fs_struct_addr; char *dentry_buf, *fs_struct_buf; char *ret ATTRIBUTE_UNUSED; ulong root_dentry, pwd_dentry; ulong root_inode, pwd_inode; ulong vfsmnt; int max_fdset = 0; int max_fds = 0; ulong open_fds_addr; fd_set open_fds; ulong fd; ulong file; ulong value; int i, j, use_path; int header_printed = 0; char root_pathname[BUFSIZE]; char pwd_pathname[BUFSIZE]; char files_header[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char root_pwd[BUFSIZE]; int root_pwd_printed = 0; int file_dump_flags = 0; BZERO(root_pathname, BUFSIZE); BZERO(pwd_pathname, BUFSIZE); files_struct_buf = GETBUF(SIZE(files_struct)); if (VALID_STRUCT(fdtable)) fdtable_buf = GETBUF(SIZE(fdtable)); fill_task_struct(task); if (flags & PRINT_NRPAGES) { sprintf(files_header, " FD%s%s%s%s%sNRPAGES%sTYPE%sPATH\n", space(MINSPACE), mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), mkstring(buf2, MAX(VADDR_PRLEN, strlen("I_MAPPING")), BITS32() ? (CENTER|RJUST) : (CENTER|LJUST), "I_MAPPING"), space(MINSPACE), space(MINSPACE), space(MINSPACE)); } else { sprintf(files_header, " FD%s%s%s%s%s%s%sTYPE%sPATH\n", space(MINSPACE), mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "FILE"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); } tc = task_to_context(task); if (ref) ref->cmdflags = 0; fs_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_fs)); if (fs_struct_addr) { fs_struct_buf = GETBUF(SIZE(fs_struct)); readmem(fs_struct_addr, KVADDR, fs_struct_buf, SIZE(fs_struct), "fs_struct buffer", FAULT_ON_ERROR); use_path = (MEMBER_TYPE("fs_struct", "root") == TYPE_CODE_STRUCT); if (use_path) root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root) + OFFSET(path_dentry)); else root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root)); if (root_dentry) { if (VALID_MEMBER(fs_struct_rootmnt)) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_rootmnt)); get_pathname(root_dentry, root_pathname, BUFSIZE, 1, vfsmnt); } else if (use_path) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_root) + OFFSET(path_mnt)); get_pathname(root_dentry, root_pathname, BUFSIZE, 1, vfsmnt); } else { get_pathname(root_dentry, root_pathname, BUFSIZE, 1, 0); } } if (use_path) pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd) + OFFSET(path_dentry)); else pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd)); if (pwd_dentry) { if (VALID_MEMBER(fs_struct_pwdmnt)) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_pwdmnt)); get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, vfsmnt); } else if (use_path) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd) + OFFSET(path_mnt)); get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, vfsmnt); } else { get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, 0); } } if ((flags & PRINT_INODES) && root_dentry && pwd_dentry) { dentry_buf = fill_dentry_cache(root_dentry); root_inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); dentry_buf = fill_dentry_cache(pwd_dentry); pwd_inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); fprintf(fp, "ROOT: %lx %s CWD: %lx %s\n", root_inode, root_pathname, pwd_inode, pwd_pathname); } else if (ref) { snprintf(root_pwd, sizeof(root_pwd), "ROOT: %s CWD: %s \n", root_pathname, pwd_pathname); if (FILENAME_COMPONENT(root_pathname, ref->str) || FILENAME_COMPONENT(pwd_pathname, ref->str)) { print_task_header(fp, tc, 0); fprintf(fp, "%s", root_pwd); root_pwd_printed = TRUE; ref->cmdflags |= FILES_REF_FOUND; } } else fprintf(fp, "ROOT: %s CWD: %s\n", root_pathname, pwd_pathname); FREEBUF(fs_struct_buf); } files_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_files)); if (files_struct_addr) { readmem(files_struct_addr, KVADDR, files_struct_buf, SIZE(files_struct), "files_struct buffer", FAULT_ON_ERROR); if (VALID_MEMBER(files_struct_max_fdset)) { max_fdset = INT(files_struct_buf + OFFSET(files_struct_max_fdset)); max_fds = INT(files_struct_buf + OFFSET(files_struct_max_fds)); } } if (VALID_MEMBER(files_struct_fdt)) { fdtable_addr = ULONG(files_struct_buf + OFFSET(files_struct_fdt)); if (fdtable_addr) { readmem(fdtable_addr, KVADDR, fdtable_buf, SIZE(fdtable), "fdtable buffer", FAULT_ON_ERROR); if (VALID_MEMBER(fdtable_max_fdset)) max_fdset = INT(fdtable_buf + OFFSET(fdtable_max_fdset)); else max_fdset = -1; max_fds = INT(fdtable_buf + OFFSET(fdtable_max_fds)); } } if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || !files_struct_addr || max_fdset == 0 || max_fds == 0) { if (ref) { if (ref->cmdflags & FILES_REF_FOUND) fprintf(fp, "\n"); } else fprintf(fp, "No open files\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); return; } if (ref && IS_A_NUMBER(ref->str)) { if (hexadecimal_only(ref->str, 0)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= FILES_REF_HEXNUM; } else { value = dtol(ref->str, FAULT_ON_ERROR, NULL); if (value <= MAX(max_fdset, max_fds)) { ref->decval = value; ref->cmdflags |= FILES_REF_DECNUM; } else { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= FILES_REF_HEXNUM; } } } if (VALID_MEMBER(fdtable_open_fds)) open_fds_addr = ULONG(fdtable_buf + OFFSET(fdtable_open_fds)); else open_fds_addr = ULONG(files_struct_buf + OFFSET(files_struct_open_fds)); if (open_fds_addr) { if (VALID_MEMBER(files_struct_open_fds_init) && (open_fds_addr == (files_struct_addr + OFFSET(files_struct_open_fds_init)))) BCOPY(files_struct_buf + OFFSET(files_struct_open_fds_init), &open_fds, sizeof(fd_set)); else readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set), "fdtable open_fds", FAULT_ON_ERROR); } if (VALID_MEMBER(fdtable_fd)) fd = ULONG(fdtable_buf + OFFSET(fdtable_fd)); else fd = ULONG(files_struct_buf + OFFSET(files_struct_fd)); if (!open_fds_addr || !fd) { if (ref && (ref->cmdflags & FILES_REF_FOUND)) fprintf(fp, "\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); return; } file_dump_flags = DUMP_FULL_NAME | DUMP_EMPTY_FILE; if (flags & PRINT_NRPAGES) file_dump_flags |= DUMP_FILE_NRPAGES; j = 0; for (;;) { unsigned long set; i = j * __NFDBITS; if (((max_fdset >= 0) && (i >= max_fdset)) || (i >= max_fds)) break; set = open_fds.__fds_bits[j++]; while (set) { if (set & 1) { readmem(fd + i*sizeof(struct file *), KVADDR, &file, sizeof(struct file *), "fd file", FAULT_ON_ERROR); if (ref && file) { open_tmpfile(); if (file_dump(file, 0, 0, i, file_dump_flags)) { BZERO(buf4, BUFSIZE); rewind(pc->tmpfile); ret = fgets(buf4, BUFSIZE, pc->tmpfile); close_tmpfile(); ref->refp = buf4; if (open_file_reference(ref)) { PRINT_FILE_REFERENCE(); } } else close_tmpfile(); } else if (file) { if (!header_printed) { fprintf(fp, "%s", files_header); header_printed = 1; } file_dump(file, 0, 0, i, file_dump_flags); } } i++; set >>= 1; } } if (!header_printed && !ref) fprintf(fp, "No open files\n"); if (ref && (ref->cmdflags & FILES_REF_FOUND)) fprintf(fp, "\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); } /* * Check an open file string for references. */ static int open_file_reference(struct reference *ref) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int i, fd, argcnt; ulong vaddr; strcpy(buf, ref->refp); if ((argcnt = parse_line(buf, arglist)) < 5) return FALSE; if (ref->cmdflags & (FILES_REF_HEXNUM|FILES_REF_DECNUM)) { fd = dtol(arglist[0], FAULT_ON_ERROR, NULL); if (((ref->cmdflags & FILES_REF_HEXNUM) && (fd == ref->hexval)) || ((ref->cmdflags & FILES_REF_DECNUM) && (fd == ref->decval))) { return TRUE; } for (i = 1; i < 4; i++) { if (STREQ(arglist[i], "?")) continue; vaddr = htol(arglist[i], FAULT_ON_ERROR, NULL); if (vaddr == ref->hexval) return TRUE; } } if (STREQ(ref->str, arglist[4])) { return TRUE; } if ((argcnt == 6) && FILENAME_COMPONENT(arglist[5], ref->str)) { return TRUE; } return FALSE; } #ifdef DEPRECATED /* * nlm_files_dump() prints files held open by lockd server on behalf * of NFS clients */ #define FILE_NRHASH 32 char nlm_files_header[BUFSIZE] = { 0 }; char *nlm_header = \ "Files open by lockd for client discretionary file locks:\n"; void nlm_files_dump(void) { int header_printed = 0; int i, j, cnt; ulong nlmsvc_ops, nlm_files; struct syment *nsp; ulong nlm_files_array[FILE_NRHASH]; struct list_data list_data, *ld; ulong *file, *files_list; ulong dentry, inode, flock, host, client; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!strlen(nlm_files_header)) { sprintf(nlm_files_header, "CLIENT %s %s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "NLM_FILE"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); } if (!symbol_exists("nlm_files") || !symbol_exists("nlmsvc_ops") || !symbol_exists("nfsd_nlm_ops")) { goto out; } get_symbol_data("nlmsvc_ops", sizeof(void *), &nlmsvc_ops); if (nlmsvc_ops != symbol_value("nfsd_nlm_ops")) { goto out; } if ((nsp = next_symbol("nlm_files", NULL)) == NULL) { error(WARNING, "cannot find next symbol after nlm_files\n"); goto out; } nlm_files = symbol_value("nlm_files"); if (((nsp->value - nlm_files) / sizeof(void *)) != FILE_NRHASH ) { error(WARNING, "FILE_NRHASH has changed from %d\n", FILE_NRHASH); if (((nsp->value - nlm_files) / sizeof(void *)) < FILE_NRHASH ) goto out; } readmem(nlm_files, KVADDR, nlm_files_array, sizeof(ulong) * FILE_NRHASH, "nlm_files array", FAULT_ON_ERROR); for (i = 0; i < FILE_NRHASH; i++) { if (nlm_files_array[i] == 0) { continue; } ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->start = nlm_files_array[i]; hq_open(); cnt = do_list(ld); files_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(files_list, cnt); hq_close(); for (j=0, file = files_list; j 1 || !STREQ(buf, "/")) && !STRNEQ(tmpname, "/")) { sprintf(pathname, "%s%s%s", buf, "/", tmpname); } else { sprintf(pathname, "%s%s", buf, tmpname); } } } else { strncpy(pathname, buf, BUFSIZE); } parent = ULONG(dentry_buf + OFFSET(dentry_d_parent)); if (tmp_dentry == parent && full) { if (VALID_MEMBER(vfsmount_mnt_mountpoint)) { if (tmp_vfsmnt) { if (strncmp(pathname, "//", 2) == 0) shift_string_left(pathname, 1); readmem(tmp_vfsmnt, KVADDR, vfsmnt_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); parent = ULONG(vfsmnt_buf + OFFSET(vfsmount_mnt_mountpoint)); mnt_parent = ULONG(vfsmnt_buf + OFFSET(vfsmount_mnt_parent)); if (tmp_vfsmnt == mnt_parent) break; else tmp_vfsmnt = mnt_parent; } } else if (VALID_STRUCT(mount)) { if (tmp_vfsmnt) { if (strncmp(pathname, "//", 2) == 0) shift_string_left(pathname, 1); readmem(tmp_vfsmnt - OFFSET(mount_mnt), KVADDR, mnt_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); parent = ULONG(mnt_buf + OFFSET(mount_mnt_mountpoint)); mnt_parent = ULONG(mnt_buf + OFFSET(mount_mnt_parent)); if ((tmp_vfsmnt - OFFSET(mount_mnt)) == mnt_parent) break; else tmp_vfsmnt = mnt_parent + OFFSET(mount_mnt); } } else { parent = ULONG(dentry_buf + OFFSET(dentry_d_covers)); } } } while (tmp_dentry != parent && parent); if (mnt_buf) FREEBUF(mnt_buf); else if (vfsmnt_buf) FREEBUF(vfsmnt_buf); } /* * If the pathname component, which may be internal or external to the * dentry, has string length equal to what's expected, copy it into the * passed-in buffer, and return its length. If it doesn't match, return 0. */ static int get_pathname_component(ulong dentry, ulong d_name_name, int d_name_len, char *dentry_buf, char *pathbuf) { int len = d_name_len; /* presume success */ if (d_name_name == (dentry + OFFSET(dentry_d_iname))) { if (strlen(dentry_buf + OFFSET(dentry_d_iname)) == d_name_len) strcpy(pathbuf, dentry_buf + OFFSET(dentry_d_iname)); else len = 0; } else if ((read_string(d_name_name, pathbuf, BUFSIZE)) != d_name_len) len = 0; return len; } /* * Cache the passed-in file structure. */ char * fill_file_cache(ulong file) { int i; char *cache; ft->file_cache_fills++; for (i = 0; i < DENTRY_CACHE; i++) { if (ft->cached_file[i] == file) { ft->cached_file_hits[i]++; cache = ft->file_cache + (SIZE(file)*i); return(cache); } } cache = ft->file_cache + (SIZE(file)*ft->file_cache_index); readmem(file, KVADDR, cache, SIZE(file), "fill_file_cache", FAULT_ON_ERROR); ft->cached_file[ft->file_cache_index] = file; ft->file_cache_index = (ft->file_cache_index+1) % DENTRY_CACHE; return(cache); } /* * If active, clear the file references. */ void clear_file_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_file[i] = 0; ft->cached_file_hits[i] = 0; } ft->file_cache_fills = 0; ft->file_cache_index = 0; } /* * Cache the passed-in dentry structure. */ char * fill_dentry_cache(ulong dentry) { int i; char *cache; ft->dentry_cache_fills++; for (i = 0; i < DENTRY_CACHE; i++) { if (ft->cached_dentry[i] == dentry) { ft->cached_dentry_hits[i]++; cache = ft->dentry_cache + (SIZE(dentry)*i); return(cache); } } cache = ft->dentry_cache + (SIZE(dentry)*ft->dentry_cache_index); readmem(dentry, KVADDR, cache, SIZE(dentry), "fill_dentry_cache", FAULT_ON_ERROR); ft->cached_dentry[ft->dentry_cache_index] = dentry; ft->dentry_cache_index = (ft->dentry_cache_index+1) % DENTRY_CACHE; return(cache); } /* * If active, clear the dentry references. */ void clear_dentry_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_dentry[i] = 0; ft->cached_dentry_hits[i] = 0; } ft->dentry_cache_fills = 0; ft->dentry_cache_index = 0; } /* * Cache the passed-in inode structure. */ char * fill_inode_cache(ulong inode) { int i; char *cache; ft->inode_cache_fills++; for (i = 0; i < INODE_CACHE; i++) { if (ft->cached_inode[i] == inode) { ft->cached_inode_hits[i]++; cache = ft->inode_cache + (SIZE(inode)*i); return(cache); } } cache = ft->inode_cache + (SIZE(inode)*ft->inode_cache_index); readmem(inode, KVADDR, cache, SIZE(inode), "fill_inode_cache", FAULT_ON_ERROR); ft->cached_inode[ft->inode_cache_index] = inode; ft->inode_cache_index = (ft->inode_cache_index+1) % INODE_CACHE; return(cache); } /* * If active, clear the inode references. */ void clear_inode_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_inode[i] = 0; ft->cached_inode_hits[i] = 0; } ft->inode_cache_fills = 0; ft->inode_cache_index = 0; } /* * This command displays the tasks using specified files or sockets. * Tasks will be listed that reference the file as the current working * directory, root directory, an open file descriptor, or that mmap the * file. * The argument can be a full pathname without symbolic links, or inode * address. */ void cmd_fuser(void) { int c; char *spec_string, *tmp; struct foreach_data foreach_data, *fd; char task_buf[BUFSIZE]; char buf[BUFSIZE]; char uses[20]; char fuser_header[BUFSIZE]; int doing_fds, doing_mmap, len; int fuser_header_printed, lockd_header_printed; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { cmd_usage(pc->curcmd, SYNOPSIS); return; } sprintf(fuser_header, " PID %s COMM USAGE\n", mkstring(buf, VADDR_PRLEN, CENTER, "TASK")); doing_fds = doing_mmap = 0; while (args[optind]) { spec_string = args[optind]; if (STRNEQ(spec_string, "0x") && hexadecimal(spec_string, 0)) shift_string_left(spec_string, 2); len = strlen(spec_string); fuser_header_printed = 0; lockd_header_printed = 0; open_tmpfile(); BZERO(&foreach_data, sizeof(struct foreach_data)); fd = &foreach_data; fd->keyword_array[0] = FOREACH_FILES; fd->keyword_array[1] = FOREACH_VM; fd->keys = 2; fd->flags |= FOREACH_i_FLAG; foreach(fd); rewind(pc->tmpfile); BZERO(uses, 20); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "PID:")) { if (!STREQ(uses, "")) { if (!fuser_header_printed) { fprintf(pc->saved_fp, "%s", fuser_header); fuser_header_printed = 1; } show_fuser(task_buf, uses); BZERO(uses, 20); } BZERO(task_buf, BUFSIZE); strcpy(task_buf, buf); doing_fds = doing_mmap = 0; continue; } if (STRNEQ(buf, "ROOT:")) { if ((tmp = strstr(buf, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { if (strstr(tmp, "CWD:")) { strcat(uses, "root "); if ((tmp = strstr(tmp+len, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { strcat(uses, "cwd "); } } else { strcat(uses, "cwd "); } } continue; } if (strstr(buf, "DENTRY")) { doing_fds = 1; continue; } if (strstr(buf, "TOTAL_VM")) { doing_fds = 0; continue; } if (strstr(buf, " VMA ")) { doing_mmap = 1; doing_fds = 0; continue; } if ((tmp = strstr(buf, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { if (doing_fds) { strcat(uses, "fd "); doing_fds = 0; } if (doing_mmap) { strcat(uses, "mmap "); doing_mmap = 0; } } } if (!STREQ(uses, "")) { if (!fuser_header_printed) { fprintf(pc->saved_fp, "%s", fuser_header); fuser_header_printed = 1; } show_fuser(task_buf, uses); BZERO(uses, 20); } close_tmpfile(); optind++; if (!fuser_header_printed && !lockd_header_printed) { fprintf(fp, "No users of %s found\n", spec_string); } } } static void show_fuser(char *buf, char *uses) { char pid[10]; char task[20]; char command[20]; char *p; int i; BZERO(pid, 10); BZERO(task, 20); BZERO(command, 20); p = strstr(buf, "PID: ") + strlen("PID: "); i = 0; while (*p != ' ' && i < 10) { pid[i++] = *p++; } pid[i] = NULLCHAR; p = strstr(buf, "TASK: ") + strlen("TASK: "); while (*p == ' ') p++; i = 0; while (*p != ' ' && i < 20) { task[i++] = *p++; } task[i] = NULLCHAR; mkstring(task, VADDR_PRLEN, RJUST, task); p = strstr(buf, "COMMAND: ") + strlen("COMMAND: "); strncpy(command, p, 16); i = strlen(command) - 1; while (i < 16) { command[i++] = ' '; } command[16] = NULLCHAR; fprintf(pc->saved_fp, "%5s %s %s %s\n", pid, task, command, uses); } /* * Gather some host memory/swap statistics, passing back whatever the * caller requires. */ int monitor_memory(long *freemem_pages, long *freeswap_pages, long *mem_usage, long *swap_usage) { FILE *mp; char buf[BUFSIZE]; char *arglist[MAXARGS]; int argc ATTRIBUTE_UNUSED; int params; ulong freemem, memtotal, freeswap, swaptotal; if (!file_exists("/proc/meminfo", NULL)) return FALSE; if ((mp = fopen("/proc/meminfo", "r")) == NULL) return FALSE; params = 0; freemem = memtotal = freeswap = swaptotal = 0; while (fgets(buf, BUFSIZE, mp)) { if (strstr(buf, "SwapFree")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) freeswap = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "MemFree")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) freemem = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "MemTotal")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) memtotal = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "SwapTotal")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) swaptotal = (atol(arglist[1]) * 1024)/PAGESIZE(); } } fclose(mp); if (params != 4) return FALSE; if (freemem_pages) *freemem_pages = freemem; if (freeswap_pages) *freeswap_pages = freeswap; if (mem_usage) *mem_usage = ((memtotal-freemem)*100) / memtotal; if (swap_usage) *swap_usage = ((swaptotal-freeswap)*100) / swaptotal; return TRUE; } /* * Determine whether two filenames reference the same file. */ int same_file(char *f1, char *f2) { struct stat stat1, stat2; if ((stat(f1, &stat1) != 0) || (stat(f2, &stat2) != 0)) return FALSE; if ((stat1.st_dev == stat2.st_dev) && (stat1.st_ino == stat2.st_ino)) return TRUE; return FALSE; } /* * Determine which live memory source to use. */ #define MODPROBE_CMD "/sbin/modprobe -l --type drivers/char 2>&1" static void get_live_memory_source(void) { FILE *pipe; char buf[BUFSIZE]; char modname1[BUFSIZE]; char modname2[BUFSIZE]; char *name; int use_module, crashbuiltin; struct stat stat1, stat2; struct utsname utsname; if (!(pc->flags & PROC_KCORE)) pc->flags |= DEVMEM; if (pc->live_memsrc) goto live_report; pc->live_memsrc = "/dev/mem"; use_module = crashbuiltin = FALSE; if (file_exists("/dev/mem", &stat1) && file_exists(pc->memory_device, &stat2) && S_ISCHR(stat1.st_mode) && S_ISCHR(stat2.st_mode) && (stat1.st_rdev == stat2.st_rdev)) { if (!STREQ(pc->memory_device, "/dev/mem")) error(INFO, "%s: same device as /dev/mem\n%s", pc->memory_device, pc->memory_module ? "" : "\n"); if (pc->memory_module) error(INFO, "ignoring --memory_module %s request\n\n", pc->memory_module); } else if (pc->memory_module && memory_driver_module_loaded(NULL)) { error(INFO, "using pre-loaded \"%s\" module\n\n", pc->memory_module); pc->flags |= MODPRELOAD; use_module = TRUE; } else { pc->memory_module = MEMORY_DRIVER_MODULE; if ((pipe = popen(MODPROBE_CMD, "r")) == NULL) { error(INFO, "%s: %s\n", MODPROBE_CMD, strerror(errno)); return; } sprintf(modname1, "%s.o", pc->memory_module); sprintf(modname2, "%s.ko", pc->memory_module); while (fgets(buf, BUFSIZE, pipe)) { if (strstr(buf, "invalid option") && (uname(&utsname) == 0)) { sprintf(buf, "/lib/modules/%s/kernel/drivers/char/%s", utsname.release, modname2); if (file_exists(buf, &stat1)) use_module = TRUE; else { strcat(buf, ".xz"); if (file_exists(buf, &stat1)) use_module = TRUE; } break; } name = basename(strip_linefeeds(buf)); if (STREQ(name, modname1) || STREQ(name, modname2)) { use_module = TRUE; break; } } pclose(pipe); if (!use_module && file_exists("/dev/crash", &stat1) && S_ISCHR(stat1.st_mode)) crashbuiltin = TRUE; } if (use_module) { pc->flags &= ~DEVMEM; pc->flags |= MEMMOD; pc->readmem = read_memory_device; pc->writemem = write_memory_device; pc->live_memsrc = pc->memory_device; } if (crashbuiltin) { pc->flags &= ~DEVMEM; pc->flags |= CRASHBUILTIN; pc->readmem = read_memory_device; pc->writemem = write_memory_device; pc->live_memsrc = pc->memory_device; pc->memory_module = NULL; } live_report: if (CRASHDEBUG(1)) fprintf(fp, "get_live_memory_source: %s\n", pc->live_memsrc); } /* * Read /proc/modules to determine whether the crash driver module * has been loaded. */ static int memory_driver_module_loaded(int *count) { FILE *modules; int argcnt, module_loaded; char *arglist[MAXARGS]; char buf[BUFSIZE]; if ((modules = fopen("/proc/modules", "r")) == NULL) { error(INFO, "/proc/modules: %s\n", strerror(errno)); return FALSE; } module_loaded = FALSE; while (fgets(buf, BUFSIZE, modules)) { console("%s", buf); argcnt = parse_line(buf, arglist); if (argcnt < 3) continue; if (STREQ(arglist[0], pc->memory_module)) { module_loaded = TRUE; if (CRASHDEBUG(1)) fprintf(stderr, "\"%s\" module loaded: [%s][%s][%s]\n", arglist[0], arglist[0], arglist[1], arglist[2]); if (count) *count = atoi(arglist[2]); break; } } fclose(modules); return module_loaded; } /* * Insmod the memory driver module. */ static int insmod_memory_driver_module(void) { FILE *pipe; char buf[BUFSIZE]; char command[BUFSIZE]; sprintf(command, "/sbin/modprobe %s", pc->memory_module); if (CRASHDEBUG(1)) fprintf(fp, "%s\n", command); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s", command, strerror(errno)); return FALSE; } while (fgets(buf, BUFSIZE, pipe)) fprintf(fp, "%s\n", buf); pclose(pipe); if (!memory_driver_module_loaded(NULL)) { error(INFO, "cannot insmod \"%s\" module\n", pc->memory_module); return FALSE; } return TRUE; } /* * Return the dev_t for the memory device driver. The major number will * be that of the kernel's misc driver; the minor is dynamically created * when the module at inmod time, and found in /proc/misc. */ static int get_memory_driver_dev(dev_t *devp) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int argcnt; FILE *misc; int minor; dev_t dev; dev = 0; if ((misc = fopen("/proc/misc", "r")) == NULL) { error(INFO, "/proc/misc: %s", strerror(errno)); } else { while (fgets(buf, BUFSIZE, misc)) { argcnt = parse_line(buf, arglist); if ((argcnt == 2) && STREQ(arglist[1], pc->memory_module)) { minor = atoi(arglist[0]); dev = makedev(MISC_MAJOR, minor); if (CRASHDEBUG(1)) fprintf(fp, "/proc/misc: %s %s => %d/%d\n", arglist[0], arglist[1], major(dev), minor(dev)); break; } } fclose(misc); } if (!dev) { error(INFO, "cannot determine minor number of %s driver\n", pc->memory_module); return FALSE; } *devp = dev; return TRUE; } /* * Deal with the creation or verification of the memory device file: * * 1. If the device exists, and has the correct major/minor device numbers, * nothing needs to be done. * 2. If the filename exists, but it's not a device file, has the wrong * major/minor device numbers, or the wrong permissions, advise the * user to delete it. * 3. Otherwise, create it. */ static int create_memory_device(dev_t dev) { struct stat stat; if (file_exists(pc->live_memsrc, &stat)) { /* * It already exists -- just use it. */ if ((stat.st_mode == MEMORY_DRIVER_DEVICE_MODE) && (stat.st_rdev == dev)) return TRUE; /* * Either it's not a device special file, or it's got * the wrong major/minor numbers, or the wrong permissions. * Unlink the file -- it shouldn't be there. */ if (!S_ISCHR(stat.st_mode)) error(FATAL, "%s: not a character device -- please delete it!\n", pc->live_memsrc); else if (dev != stat.st_rdev) error(FATAL, "%s: invalid device: %d/%d -- please delete it!\n", pc->live_memsrc, major(stat.st_rdev), minor(stat.st_rdev)); else unlink(pc->live_memsrc); } /* * Either it doesn't exist or it was just unlinked. * In either case, try to create it. */ if (mknod(pc->live_memsrc, MEMORY_DRIVER_DEVICE_MODE, dev)) { error(INFO, "%s: mknod: %s\n", pc->live_memsrc, strerror(errno)); return FALSE; } return TRUE; } /* * If we're here, the memory driver module is being requested: * * 1. If /dev/crash is built into the kernel, just open it. * 2. If the module is not already loaded, insmod it. * 3. Determine the misc driver minor device number that it was assigned. * 4. Create (or verify) the device file. * 5. Then just open it. */ static int memory_driver_init(void) { dev_t dev; if (pc->flags & CRASHBUILTIN) goto open_device; if (!memory_driver_module_loaded(NULL)) { if (!insmod_memory_driver_module()) return FALSE; } else pc->flags |= MODPRELOAD; if (!get_memory_driver_dev(&dev)) return FALSE; if (!create_memory_device(dev)) return FALSE; open_device: if ((pc->mfd = open(pc->memory_device, O_RDONLY)) < 0) { error(INFO, "%s: open: %s\n", pc->memory_device, strerror(errno)); return FALSE; } return TRUE; } /* * Remove the memory driver module and associated file. */ int cleanup_memory_driver(void) { int errors, count; char command[BUFSIZE]; count = errors = 0; if (pc->flags & KERNEL_DEBUG_QUERY) return TRUE; close(pc->mfd); if (file_exists(pc->memory_device, NULL) && unlink(pc->memory_device)) { error(INFO, "%s: %s\n", pc->memory_device, strerror(errno)); errors++; } if (!(pc->flags & MODPRELOAD) && memory_driver_module_loaded(&count) && !count) { sprintf(command, "/sbin/rmmod %s", pc->memory_module); if (CRASHDEBUG(1)) fprintf(fp, "%s\n", command); errors += system(command); } if (errors) error(NOTE, "cleanup_memory_driver failed\n"); return errors ? FALSE : TRUE; } /* * Use the kernel's radix_tree_lookup() function as a template to dump * a radix tree's entries. */ ulong RADIX_TREE_MAP_SHIFT = UNINITIALIZED; ulong RADIX_TREE_MAP_SIZE = UNINITIALIZED; ulong RADIX_TREE_MAP_MASK = UNINITIALIZED; /* * do_radix_tree argument usage: * * root: Address of a radix_tree_root structure * * flag: RADIX_TREE_COUNT - Return the number of entries in the tree. * RADIX_TREE_SEARCH - Search for an entry at rtp->index; if found, * store the entry in rtp->value and return a count of 1; otherwise * return a count of 0. * RADIX_TREE_DUMP - Dump all existing index/value pairs. * RADIX_TREE_GATHER - Store all existing index/value pairs in the * passed-in array of radix_tree_pair structs starting at rtp, * returning the count of entries stored; the caller can/should * limit the number of returned entries by putting the array size * (max count) in the rtp->index field of the first structure * in the passed-in array. * RADIX_TREE_DUMP_CB - Similar with RADIX_TREE_DUMP, but for each * radix tree entry, a user defined callback at rtp->value will * be invoked. * * rtp: Unused by RADIX_TREE_COUNT and RADIX_TREE_DUMP. * A pointer to a radix_tree_pair structure for RADIX_TREE_SEARCH. * A pointer to an array of radix_tree_pair structures for * RADIX_TREE_GATHER; the dimension (max count) of the array may * be stored in the index field of the first structure to avoid * any chance of an overrun. * For RADIX_TREE_DUMP_CB, the rtp->value must be initialized as a * callback function. The callback prototype must be: int (*)(ulong); */ ulong do_radix_tree(ulong root, int flag, struct radix_tree_pair *rtp) { int i, ilen, height; long nlen; ulong index, maxindex, count, maxcount; long *height_to_maxindex; char *radix_tree_root_buf; struct radix_tree_pair *r; ulong root_rnode; void *ret; int (*cb)(ulong) = NULL; count = 0; if (!VALID_STRUCT(radix_tree_root) || !VALID_STRUCT(radix_tree_node) || !VALID_MEMBER(radix_tree_root_height) || !VALID_MEMBER(radix_tree_root_rnode) || !VALID_MEMBER(radix_tree_node_slots) || !ARRAY_LENGTH(height_to_maxindex)) error(FATAL, "radix trees do not exist (or have changed their format)\n"); if (RADIX_TREE_MAP_SHIFT == UNINITIALIZED) { if (!(nlen = MEMBER_SIZE("radix_tree_node", "slots"))) error(FATAL, "cannot determine length of " "radix_tree_node.slots[] array\n"); nlen /= sizeof(void *); RADIX_TREE_MAP_SHIFT = ffsl(nlen) - 1; RADIX_TREE_MAP_SIZE = (1UL << RADIX_TREE_MAP_SHIFT); RADIX_TREE_MAP_MASK = (RADIX_TREE_MAP_SIZE-1); } ilen = ARRAY_LENGTH(height_to_maxindex); height_to_maxindex = (long *)GETBUF(ilen * sizeof(long)); readmem(symbol_value("height_to_maxindex"), KVADDR, height_to_maxindex, ilen*sizeof(long), "height_to_maxindex array", FAULT_ON_ERROR); if (CRASHDEBUG(1)) { fprintf(fp, "radix_tree_node.slots[%ld]\n", RADIX_TREE_MAP_SIZE); fprintf(fp, "height_to_maxindex[%d]: ", ilen); for (i = 0; i < ilen; i++) fprintf(fp, "%lu ", height_to_maxindex[i]); fprintf(fp, "\n"); fprintf(fp, "radix_tree_root at %lx:\n", root); dump_struct("radix_tree_root", (ulong)root, RADIX(16)); } radix_tree_root_buf = GETBUF(SIZE(radix_tree_root)); readmem(root, KVADDR, radix_tree_root_buf, SIZE(radix_tree_root), "radix_tree_root", FAULT_ON_ERROR); height = UINT(radix_tree_root_buf + OFFSET(radix_tree_root_height)); if ((height < 0) || (height > ilen)) { error(INFO, "height_to_maxindex[] index: %ld\n", ilen); fprintf(fp, "invalid height in radix_tree_root at %lx:\n", root); dump_struct("radix_tree_root", (ulong)root, RADIX(16)); return 0; } maxindex = height_to_maxindex[height]; FREEBUF(height_to_maxindex); FREEBUF(radix_tree_root_buf); root_rnode = root + OFFSET(radix_tree_root_rnode); switch (flag) { case RADIX_TREE_COUNT: for (index = count = 0; index <= maxindex; index++) { if (radix_tree_lookup(root_rnode, index, height)) count++; } break; case RADIX_TREE_SEARCH: count = 0; if (rtp->index > maxindex) break; if ((ret = radix_tree_lookup(root_rnode, rtp->index, height))) { rtp->value = ret; count++; } break; case RADIX_TREE_DUMP: for (index = count = 0; index <= maxindex; index++) { if ((ret = radix_tree_lookup(root_rnode, index, height))) { fprintf(fp, "[%ld] %lx\n", index, (ulong)ret); count++; } } break; case RADIX_TREE_GATHER: if (!(maxcount = rtp->index)) maxcount = (ulong)(-1); /* caller beware */ for (index = count = 0, r = rtp; index <= maxindex; index++) { if ((ret = radix_tree_lookup(root_rnode, index, height))) { r->index = index; r->value = ret; count++; if (--maxcount <= 0) break; r++; } } break; case RADIX_TREE_DUMP_CB: if (rtp->value == NULL) { error(FATAL, "do_radix_tree: need set callback function"); return -EINVAL; } cb = (int (*)(ulong))rtp->value; for (index = count = 0; index <= maxindex; index++) { if ((ret = radix_tree_lookup(root_rnode, index, height))) { /* Caller defined operation */ if (!cb((ulong)ret)) { error(FATAL, "do_radix_tree: callback " "operation failed: entry: %ld item: %lx\n", count, (ulong)ret); } count++; } } break; default: error(FATAL, "do_radix_tree: invalid flag: %lx\n", flag); } return count; } static void * radix_tree_lookup(ulong root_rnode, ulong index, int height) { unsigned int shift; ulong rnode; ulong *slots; shift = (height-1) * RADIX_TREE_MAP_SHIFT; readmem(root_rnode, KVADDR, &rnode, sizeof(void *), "radix_tree_root rnode", FAULT_ON_ERROR); if (rnode & 1) rnode &= ~1; slots = (ulong *)GETBUF(sizeof(void *) * RADIX_TREE_MAP_SIZE); while (height > 0) { if (rnode == 0) break; readmem((ulong)rnode+OFFSET(radix_tree_node_slots), KVADDR, &slots[0], sizeof(void *) * RADIX_TREE_MAP_SIZE, "radix_tree_node.slots array", FAULT_ON_ERROR); rnode = slots[((index >> shift) & RADIX_TREE_MAP_MASK)]; shift -= RADIX_TREE_MAP_SHIFT; height--; } FREEBUF(slots); return (void *)rnode; } int is_readable(char *filename) { int fd; if ((fd = open(filename, O_RDONLY)) < 0) { error(INFO, "%s: %s\n", filename, strerror(errno)); return FALSE; } else close(fd); return TRUE; } static int match_file_string(char *filename, char *string, char *buffer) { int found; char command[BUFSIZE]; FILE *pipe; sprintf(command, "/usr/bin/strings %s", filename); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s\n", filename, strerror(errno)); return FALSE; } found = FALSE; while (fgets(buffer, BUFSIZE-1, pipe)) { if (strstr(buffer, string)) { found = TRUE; break; } } pclose(pipe); return found; } char * vfsmount_devname(ulong vfsmnt, char *buf, int maxlen) { ulong devp; BZERO(buf, maxlen); if (VALID_STRUCT(mount)) { if (!readmem(vfsmnt - OFFSET(mount_mnt) + OFFSET(mount_mnt_devname), KVADDR, &devp, sizeof(void *), "mount mnt_devname", QUIET|RETURN_ON_ERROR)) return buf; } else { if (!readmem(vfsmnt + OFFSET(vfsmount_mnt_devname), KVADDR, &devp, sizeof(void *), "vfsmount mnt_devname", QUIET|RETURN_ON_ERROR)) return buf; } if (read_string(devp, buf, BUFSIZE-1)) return buf; return buf; } static ulong get_root_vfsmount(char *file_buf) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; ulong vfsmnt; ulong mnt_parent; vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); if (!strlen(vfsmount_devname(vfsmnt, buf1, BUFSIZE))) return vfsmnt; if (STREQ(buf1, "udev") || STREQ(buf1, "devtmpfs")) { if (VALID_STRUCT(mount)) { if (!readmem(vfsmnt - OFFSET(mount_mnt) + OFFSET(mount_mnt_parent), KVADDR, &mnt_parent, sizeof(void *), "mount mnt_parent", QUIET|RETURN_ON_ERROR)) return vfsmnt; } else { if (!readmem(vfsmnt + OFFSET(vfsmount_mnt_parent), KVADDR, &mnt_parent, sizeof(void *), "vfsmount mnt_parent", QUIET|RETURN_ON_ERROR)) return vfsmnt; } if (!strlen(vfsmount_devname(mnt_parent, buf2, BUFSIZE))) return vfsmnt; if (STREQ(buf1, "udev") && STREQ(buf2, "udev")) return mnt_parent; if (STREQ(buf1, "devtmpfs") && STREQ(buf2, "devtmpfs")) return mnt_parent; } return vfsmnt; } void check_live_arch_mismatch(void) { struct utsname utsname; if (machine_type("X86") && (uname(&utsname) == 0) && STRNEQ(utsname.machine, "x86_64")) error(FATAL, "compiled for the X86 architecture\n"); #if defined(__i386__) || defined(__x86_64__) if (machine_type("ARM")) error(FATAL, "compiled for the ARM architecture\n"); #endif #ifdef __x86_64__ if (machine_type("ARM64")) error(FATAL, "compiled for the ARM64 architecture\n"); #endif #ifdef __x86_64__ if (machine_type("PPC64")) error(FATAL, "compiled for the PPC64 architecture\n"); #endif #ifdef __powerpc64__ if (machine_type("PPC")) error(FATAL, "compiled for the PPC architecture\n"); #endif } crash-7.1.4/Makefile0000644000000000000000000006040512634305150012765 0ustar rootroot# Makefile for core analysis suite # # Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. # www.missioncriticallinux.com, info@missioncriticallinux.com # # Copyright (C) 2002-2013 David Anderson # Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # PROGRAM=crash # # Supported targets: X86 ALPHA PPC IA64 PPC64 # TARGET and GDB_CONF_FLAGS will be configured automatically by configure # TARGET= GDB_CONF_FLAGS= ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/) ifeq (${ARCH}, ppc64) CONF_FLAGS = -m64 endif # # GDB, GDB_FILES, GDB_OFILES and GDB_PATCH_FILES will be configured automatically by configure # GDB=gdb-7.6 GDB_FILES=${GDB_7.6_FILES} GDB_OFILES= GDB_PATCH_FILES=gdb-7.6.patch gdb-7.6-ppc64le-support.patch # # Default installation directory # INSTALLDIR=${DESTDIR}/usr/bin # LDFLAGS will be configured automatically by configure LDFLAGS= GENERIC_HFILES=defs.h xen_hyper_defs.h xen_dom0.h MCORE_HFILES=va_server.h vas_crash.h REDHAT_HFILES=netdump.h diskdump.h makedumpfile.h xendump.h kvmdump.h qemu-load.h LKCD_DUMP_HFILES=lkcd_vmdump_v1.h lkcd_vmdump_v2_v3.h lkcd_dump_v5.h \ lkcd_dump_v7.h lkcd_dump_v8.h LKCD_OBSOLETE_HFILES=lkcd_fix_mem.h LKCD_TRACE_HFILES=lkcd_x86_trace.h IBM_HFILES=ibm_common.h SADUMP_HFILES=sadump.h UNWIND_HFILES=unwind.h unwind_i.h rse.h unwind_x86.h unwind_x86_64.h VMWARE_HFILES=vmware_vmss.h CFILES=main.c tools.c global_data.c memory.c filesys.c help.c task.c \ kernel.c test.c gdb_interface.c configure.c net.c dev.c \ alpha.c x86.c ppc.c ia64.c s390.c s390x.c s390dbf.c ppc64.c x86_64.c \ arm.c arm64.c mips.c \ extensions.c remote.c va_server.c va_server_v1.c symbols.c cmdline.c \ lkcd_common.c lkcd_v1.c lkcd_v2_v3.c lkcd_v5.c lkcd_v7.c lkcd_v8.c\ lkcd_fix_mem.c s390_dump.c lkcd_x86_trace.c \ netdump.c diskdump.c makedumpfile.c xendump.c unwind.c unwind_decoder.c \ unwind_x86_32_64.c unwind_arm.c \ xen_hyper.c xen_hyper_command.c xen_hyper_global_data.c \ xen_hyper_dump_tables.c kvmdump.c qemu.c qemu-load.c sadump.c ipcs.c \ ramdump.c vmware_vmss.c \ xen_dom0.c SOURCE_FILES=${CFILES} ${GENERIC_HFILES} ${MCORE_HFILES} \ ${REDHAT_CFILES} ${REDHAT_HFILES} ${UNWIND_HFILES} \ ${LKCD_DUMP_HFILES} ${LKCD_TRACE_HFILES} ${LKCD_OBSOLETE_HFILES}\ ${IBM_HFILES} ${SADUMP_HFILES} ${VMWARE_HFILES} OBJECT_FILES=main.o tools.o global_data.o memory.o filesys.o help.o task.o \ build_data.o kernel.o test.o gdb_interface.o net.o dev.o \ alpha.o x86.o ppc.o ia64.o s390.o s390x.o s390dbf.o ppc64.o x86_64.o \ arm.o arm64.o mips.o \ extensions.o remote.o va_server.o va_server_v1.o symbols.o cmdline.o \ lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \ lkcd_fix_mem.o s390_dump.o netdump.o diskdump.o makedumpfile.o xendump.o \ lkcd_x86_trace.o unwind_v1.o unwind_v2.o unwind_v3.o \ unwind_x86_32_64.o unwind_arm.o \ xen_hyper.o xen_hyper_command.o xen_hyper_global_data.o \ xen_hyper_dump_tables.o kvmdump.o qemu.o qemu-load.o sadump.o ipcs.o \ ramdump.o vmware_vmss.o \ xen_dom0.o MEMORY_DRIVER_FILES=memory_driver/Makefile memory_driver/crash.c memory_driver/README # These are the current set of crash extensions sources. They are not built # by default unless the third command line of the "all:" stanza is uncommented. # Alternatively, they can be built by entering "make extensions" from this # directory. EXTENSIONS=extensions EXTENSION_SOURCE_FILES=${EXTENSIONS}/Makefile ${EXTENSIONS}/echo.c ${EXTENSIONS}/dminfo.c \ ${EXTENSIONS}/snap.c ${EXTENSIONS}/snap.mk ${EXTENSIONS}/trace.c \ ${EXTENSIONS}/eppic.c ${EXTENSIONS}/eppic.mk DAEMON_OBJECT_FILES=remote_daemon.o va_server.o va_server_v1.o \ lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \ s390_dump.o netdump_daemon.o GDB_5.0_FILES=${GDB}/gdb/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/blockframe.c ${GDB}/gdb/alpha-tdep.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/gnu-regex.c \ ${GDB}/gdb/ppc-linux-nat.c GDB_5.0_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o ${GDB}/gdb/target.o \ ${GDB}/gdb/blockframe.o ${GDB}/gdb/alpha-tdep.o \ ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/gnu-regex.o \ ${GDB}/gdb/ppc-linux-nat.o GDB_5.1_FILES=${GDB}/gdb/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/blockframe.c ${GDB}/gdb/alpha-tdep.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/gnu-regex.c GDB_5.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o ${GDB}/gdb/target.o \ ${GDB}/gdb/blockframe.o ${GDB}/gdb/alpha-tdep.o \ ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/gnu-regex.o GDB_5.2.1_FILES=${GDB}/gdb/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/blockframe.c ${GDB}/gdb/alpha-tdep.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c GDB_5.2.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o ${GDB}/gdb/target.o \ ${GDB}/gdb/blockframe.o ${GDB}/gdb/alpha-tdep.o \ ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o GDB_5.3post-0.20021129.36rh_FILES=${GDB}/gdb/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/frame.c ${GDB}/gdb/alpha-tdep.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/dwarf2read.c GDB_5.3post-0.20021129.36rh_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \ ${GDB}/gdb/target.o ${GDB}/gdb/frame.o ${GDB}/gdb/alpha-tdep.o \ ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o ${GDB}/gdb/ui-file.o \ ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o GDB_6.0_FILES=${GDB}/gdb/Makefile.in ${GDB}/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c \ ${GDB}/gdb/ppc-linux-tdep.c ${GDB}/sim/ppc/ppc-instructions \ ${GDB}/bfd/simple.c ${GDB}/include/obstack.h GDB_6.0_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \ ${GDB}/gdb/target.o ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o \ ${GDB}/gdb/ppc-linux-tdep.o ${GDB}/bfd/simple.o GDB_6.1_FILES=${GDB}/gdb/Makefile.in ${GDB}/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/dwarf2read.c \ ${GDB}/include/obstack.h ${GDB}/gdb/ppc-linux-tdep.c GDB_6.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \ ${GDB}/gdb/target.o ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o \ ${GDB}/gdb/ppc-linux-tdep.o GDB_7.0_FILES= GDB_7.0_OFILES=${GDB}/gdb/symtab.o GDB_7.3.1_FILES= GDB_7.3.1_OFILES=${GDB}/gdb/symtab.o GDB_7.6_FILES= GDB_7.6_OFILES=${GDB}/gdb/symtab.o # # GDB_FLAGS is passed up from the gdb Makefile. # GDB_FLAGS= # # WARNING_OPTIONS and WARNING_ERROR are both applied on a per-file basis. # WARNING_ERROR is NOT used on files including "dirty" gdb headers so that # successful compilations can be achieved with acceptable warnings; its # usefulness is also dependent upon the processor's compiler -- your mileage # may vary. # #WARNING_OPTIONS=-Wall -O2 -Wstrict-prototypes -Wmissing-prototypes -fstack-protector -Wformat-security #WARNING_ERROR=-Werror # TARGET_CFLAGS will be configured automatically by configure TARGET_CFLAGS= CRASH_CFLAGS=-g -D${TARGET} ${TARGET_CFLAGS} ${GDB_FLAGS} ${CFLAGS} GPL_FILES=COPYING3 TAR_FILES=${SOURCE_FILES} Makefile ${GPL_FILES} README .rh_rpm_package crash.8 \ ${EXTENSION_SOURCE_FILES} ${MEMORY_DRIVER_FILES} CSCOPE_FILES=${SOURCE_FILES} READLINE_DIRECTORY=./${GDB}/readline BFD_DIRECTORY=./${GDB}/bfd GDB_INCLUDE_DIRECTORY=./${GDB}/include REDHATFLAGS=-DREDHAT # target could be set on command line when invoking make. Like: make target=ARM # otherwise target will be the same as the host ifneq ($(target),) CONF_TARGET_FLAG="-t$(target)" endif # To build the extensions library by default, uncomment the third command # line below. Otherwise they can be built by entering "make extensions". all: make_configure @./configure ${CONF_TARGET_FLAG} -p "RPMPKG=${RPMPKG}" -b @make --no-print-directory gdb_merge # @make --no-print-directory extensions gdb_merge: force @if [ ! -f ${GDB}/README ]; then \ make --no-print-directory gdb_unzip; fi @echo "${LDFLAGS} -lz -ldl -rdynamic" > ${GDB}/gdb/mergelibs @echo "../../${PROGRAM} ../../${PROGRAM}lib.a" > ${GDB}/gdb/mergeobj @rm -f ${PROGRAM} @if [ ! -f ${GDB}/config.status ]; then \ (cd ${GDB}; ./configure ${GDB_CONF_FLAGS} --with-separate-debug-dir=/usr/lib/debug \ --with-bugurl="" --with-expat=no --with-python=no; \ make --no-print-directory CRASH_TARGET=${TARGET}; echo ${TARGET} > crash.target) \ else make --no-print-directory rebuild; fi @if [ ! -f ${PROGRAM} ]; then \ echo; echo "${PROGRAM} build failed"; \ echo; exit 1; fi rebuild: @if [ ! -f ${GDB}/${GDB}.patch ]; then \ touch ${GDB}/${GDB}.patch; fi @if [ -f ${GDB}.patch ] && [ -s ${GDB}.patch ] && \ [ "`sum ${GDB}.patch`" != "`sum ${GDB}/${GDB}.patch`" ]; then \ (patch -N -p0 -r- < ${GDB}.patch; cp ${GDB}.patch ${GDB}; cd ${GDB}; \ make --no-print-directory CRASH_TARGET=${TARGET}) \ else (cd ${GDB}/gdb; make --no-print-directory CRASH_TARGET=${TARGET}); fi gdb_unzip: @rm -f gdb.files @for FILE in ${GDB_FILES} dummy; do\ echo $$FILE >> gdb.files; done @if [ ! -f ${GDB}.tar.gz ] && [ ! -f /usr/bin/wget ]; then \ echo /usr/bin/wget is required to download ${GDB}.tar.gz; echo; exit 1; fi @if [ ! -f ${GDB}.tar.gz ] && [ -f /usr/bin/wget ]; then \ wget http://ftp.gnu.org/gnu/gdb/${GDB}.tar.gz; fi @tar --exclude-from gdb.files -xvzmf ${GDB}.tar.gz @make --no-print-directory gdb_patch gdb_patch: if [ -f ${GDB}.patch ] && [ -s ${GDB}.patch ]; then \ patch -p0 < ${GDB}.patch; cp ${GDB}.patch ${GDB}; fi if [ "${ARCH}" = "ppc64le" ] && [ -f ${GDB}-ppc64le-support.patch ]; then \ patch -d ${GDB} -p1 -F0 < ${GDB}-ppc64le-support.patch ; \ fi if [ "${ARCH}" = "x86_64" ] && [ "${TARGET}" = "PPC64" ] && [ -f ${GDB}-ppc64le-support.patch ]; then \ patch -d ${GDB} -p1 -F0 < ${GDB}-ppc64le-support.patch ; \ fi library: make_build_data ${OBJECT_FILES} ar -rs ${PROGRAM}lib.a ${OBJECT_FILES} gdb: force rm -f ${GDB_OFILES} @make --no-print-directory all force: make_configure: force @rm -f configure @${CC} ${CONF_FLAGS} -o configure configure.c ${WARNING_ERROR} ${WARNING_OPTIONS} clean: make_configure @./configure ${CONF_TARGET_FLAG} -q -b @make --no-print-directory do_clean do_clean: rm -f ${OBJECT_FILES} ${DAEMON_OBJECT_FILES} ${PROGRAM} ${PROGRAM}lib.a ${GDB_OFILES} @(cd extensions; make --no-print-directory -i clean) @(cd memory_driver; make --no-print-directory -i clean) make_build_data: force ${CC} -c ${CRASH_CFLAGS} build_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} install: /usr/bin/install ${PROGRAM} ${INSTALLDIR} # /usr/bin/install ${PROGRAM}d ${INSTALLDIR} unconfig: make_configure @./configure -u warn: make_configure @./configure ${CONF_TARGET_FLAG} -w -b @make --no-print-directory gdb_merge Warn: make_configure @./configure ${CONF_TARGET_FLAG} -W -b @make --no-print-directory gdb_merge nowarn: make_configure @./configure ${CONF_TARGET_FLAG} -n -b @make --no-print-directory gdb_merge lzo: make_configure @./configure -x lzo ${CONF_TARGET_FLAG} -w -b @make --no-print-directory gdb_merge snappy: make_configure @./configure -x snappy ${CONF_TARGET_FLAG} -w -b @make --no-print-directory gdb_merge main.o: ${GENERIC_HFILES} main.c ${CC} -c ${CRASH_CFLAGS} main.c ${WARNING_OPTIONS} ${WARNING_ERROR} cmdline.o: ${GENERIC_HFILES} cmdline.c ${CC} -c ${CRASH_CFLAGS} cmdline.c -I${READLINE_DIRECTORY} ${WARNING_OPTIONS} ${WARNING_ERROR} tools.o: ${GENERIC_HFILES} tools.c ${CC} -c ${CRASH_CFLAGS} tools.c ${WARNING_OPTIONS} ${WARNING_ERROR} global_data.o: ${GENERIC_HFILES} global_data.c ${CC} -c ${CRASH_CFLAGS} global_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} symbols.o: ${GENERIC_HFILES} symbols.c ${CC} -c ${CRASH_CFLAGS} symbols.c -I${BFD_DIRECTORY} -I${GDB_INCLUDE_DIRECTORY} ${WARNING_OPTIONS} ${WARNING_ERROR} filesys.o: ${GENERIC_HFILES} filesys.c ${CC} -c ${CRASH_CFLAGS} filesys.c ${WARNING_OPTIONS} ${WARNING_ERROR} help.o: ${GENERIC_HFILES} help.c ${CC} -c ${CRASH_CFLAGS} help.c ${WARNING_OPTIONS} ${WARNING_ERROR} memory.o: ${GENERIC_HFILES} memory.c ${CC} -c ${CRASH_CFLAGS} memory.c ${WARNING_OPTIONS} ${WARNING_ERROR} test.o: ${GENERIC_HFILES} test.c ${CC} -c ${CRASH_CFLAGS} test.c ${WARNING_OPTIONS} ${WARNING_ERROR} task.o: ${GENERIC_HFILES} task.c ${CC} -c ${CRASH_CFLAGS} task.c ${WARNING_OPTIONS} ${WARNING_ERROR} kernel.o: ${GENERIC_HFILES} kernel.c ${CC} -c ${CRASH_CFLAGS} kernel.c ${WARNING_OPTIONS} ${WARNING_ERROR} gdb_interface.o: ${GENERIC_HFILES} gdb_interface.c ${CC} -c ${CRASH_CFLAGS} gdb_interface.c ${WARNING_OPTIONS} ${WARNING_ERROR} va_server.o: ${MCORE_HFILES} va_server.c ${CC} -c ${CRASH_CFLAGS} va_server.c ${WARNING_OPTIONS} ${WARNING_ERROR} va_server_v1.o: ${MCORE_HFILES} va_server_v1.c ${CC} -c ${CRASH_CFLAGS} va_server_v1.c ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_common.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_common.c ${CC} -c ${CRASH_CFLAGS} lkcd_common.c ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v1.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v1.c ${CC} -c ${CRASH_CFLAGS} lkcd_v1.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v2_v3.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v2_v3.c ${CC} -c ${CRASH_CFLAGS} lkcd_v2_v3.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v5.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v5.c ${CC} -c ${CRASH_CFLAGS} lkcd_v5.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v7.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v7.c ${CC} -c ${CRASH_CFLAGS} lkcd_v7.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v8.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v8.c ${CC} -c ${CRASH_CFLAGS} lkcd_v8.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} net.o: ${GENERIC_HFILES} net.c ${CC} -c ${CRASH_CFLAGS} net.c ${WARNING_OPTIONS} ${WARNING_ERROR} dev.o: ${GENERIC_HFILES} dev.c ${CC} -c ${CRASH_CFLAGS} dev.c ${WARNING_OPTIONS} ${WARNING_ERROR} # remote.c functionality has been deprecated remote.o: ${GENERIC_HFILES} remote.c @${CC} -c ${CRASH_CFLAGS} remote.c ${WARNING_OPTIONS} ${WARNING_ERROR} remote_daemon.o: ${GENERIC_HFILES} remote.c ${CC} -c ${CRASH_CFLAGS} -DDAEMON remote.c -o remote_daemon.o ${WARNING_OPTIONS} ${WARNING_ERROR} x86.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86.c ${CC} -c ${CRASH_CFLAGS} x86.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} alpha.o: ${GENERIC_HFILES} alpha.c ${CC} -c ${CRASH_CFLAGS} alpha.c ${WARNING_OPTIONS} ${WARNING_ERROR} ppc.o: ${GENERIC_HFILES} ppc.c ${CC} -c ${CRASH_CFLAGS} ppc.c ${WARNING_OPTIONS} ${WARNING_ERROR} ia64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ia64.c ${CC} -c ${CRASH_CFLAGS} ia64.c ${WARNING_OPTIONS} ${WARNING_ERROR} ppc64.o: ${GENERIC_HFILES} ppc64.c ${CC} -c ${CRASH_CFLAGS} ppc64.c ${WARNING_OPTIONS} ${WARNING_ERROR} x86_64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86_64.c ${CC} -c ${CRASH_CFLAGS} x86_64.c ${WARNING_OPTIONS} ${WARNING_ERROR} arm.o: ${GENERIC_HFILES} ${REDHAT_HFILES} arm.c ${CC} -c ${CRASH_CFLAGS} arm.c ${WARNING_OPTIONS} ${WARNING_ERROR} arm64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} arm64.c ${CC} -c ${CRASH_CFLAGS} arm64.c ${WARNING_OPTIONS} ${WARNING_ERROR} mips.o: ${GENERIC_HFILES} ${REDHAT_HFILES} mips.c ${CC} -c ${CRASH_CFLAGS} mips.c ${WARNING_OPTIONS} ${WARNING_ERROR} s390.o: ${GENERIC_HFILES} ${IBM_HFILES} s390.c ${CC} -c ${CRASH_CFLAGS} s390.c ${WARNING_OPTIONS} ${WARNING_ERROR} s390x.o: ${GENERIC_HFILES} ${IBM_HFILES} s390x.c ${CC} -c ${CRASH_CFLAGS} s390x.c ${WARNING_OPTIONS} ${WARNING_ERROR} s390dbf.o: ${GENERIC_HFILES} ${IBM_HFILES} s390dbf.c ${CC} -c ${CRASH_CFLAGS} s390dbf.c ${WARNING_OPTIONS} ${WARNING_ERROR} s390_dump.o: ${GENERIC_HFILES} ${IBM_HFILES} s390_dump.c ${CC} -c ${CRASH_CFLAGS} s390_dump.c ${WARNING_OPTIONS} ${WARNING_ERROR} netdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ${SADUMP_HFILES} netdump.c ${CC} -c ${CRASH_CFLAGS} netdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} netdump_daemon.o: ${GENERIC_HFILES} ${REDHAT_HFILES} netdump.c ${CC} -c ${CRASH_CFLAGS} -DDAEMON netdump.c -o netdump_daemon.o ${WARNING_OPTIONS} ${WARNING_ERROR} diskdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} diskdump.c ${CC} -c ${CRASH_CFLAGS} diskdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} makedumpfile.o: ${GENERIC_HFILES} ${REDHAT_HFILES} makedumpfile.c ${CC} -c ${CRASH_CFLAGS} makedumpfile.c ${WARNING_OPTIONS} ${WARNING_ERROR} xendump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} xendump.c ${CC} -c ${CRASH_CFLAGS} xendump.c ${WARNING_OPTIONS} ${WARNING_ERROR} kvmdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} kvmdump.c ${CC} -c ${CRASH_CFLAGS} kvmdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} qemu.o: ${GENERIC_HFILES} ${REDHAT_HFILES} qemu.c ${CC} -c ${CRASH_CFLAGS} qemu.c ${WARNING_OPTIONS} ${WARNING_ERROR} qemu-load.o: ${GENERIC_HFILES} ${REDHAT_HFILES} qemu-load.c ${CC} -c ${CRASH_CFLAGS} qemu-load.c ${WARNING_OPTIONS} ${WARNING_ERROR} sadump.o: ${GENERIC_HFILES} ${SADUMP_HFILES} sadump.c ${CC} -c ${CRASH_CFLAGS} sadump.c ${WARNING_OPTIONS} ${WARNING_ERROR} ipcs.o: ${GENERIC_HFILES} ipcs.c ${CC} -c ${CRASH_CFLAGS} ipcs.c ${WARNING_OPTIONS} ${WARNING_ERROR} extensions.o: ${GENERIC_HFILES} extensions.c ${CC} -c ${CRASH_CFLAGS} extensions.c ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_x86_trace.o: ${GENERIC_HFILES} ${LKCD_TRACE_HFILES} lkcd_x86_trace.c ${CC} -c ${CRASH_CFLAGS} lkcd_x86_trace.c -DREDHAT ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_x86_32_64.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind_x86_32_64.c ${CC} -c ${CRASH_CFLAGS} unwind_x86_32_64.c -o unwind_x86_32_64.o ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_arm.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind_arm.c ${CC} -c ${CRASH_CFLAGS} unwind_arm.c -o unwind_arm.o ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_v1.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c ${CC} -c ${CRASH_CFLAGS} unwind.c -DREDHAT -DUNWIND_V1 -o unwind_v1.o ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_v2.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c ${CC} -c ${CRASH_CFLAGS} unwind.c -DREDHAT -DUNWIND_V2 -o unwind_v2.o ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_v3.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c ${CC} -c ${CRASH_CFLAGS} unwind.c -DREDHAT -DUNWIND_V3 -o unwind_v3.o ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_fix_mem.o: ${GENERIC_HFILES} ${LKCD_HFILES} lkcd_fix_mem.c ${CC} -c ${CRASH_CFLAGS} lkcd_fix_mem.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} xen_hyper.o: ${GENERIC_HFILES} xen_hyper.c ${CC} -c ${CRASH_CFLAGS} xen_hyper.c ${WARNING_OPTIONS} ${WARNING_ERROR} xen_hyper_command.o: ${GENERIC_HFILES} xen_hyper_command.c ${CC} -c ${CRASH_CFLAGS} xen_hyper_command.c ${WARNING_OPTIONS} ${WARNING_ERROR} xen_hyper_global_data.o: ${GENERIC_HFILES} xen_hyper_global_data.c ${CC} -c ${CRASH_CFLAGS} xen_hyper_global_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} xen_hyper_dump_tables.o: ${GENERIC_HFILES} xen_hyper_dump_tables.c ${CC} -c ${CRASH_CFLAGS} xen_hyper_dump_tables.c ${WARNING_OPTIONS} ${WARNING_ERROR} xen_dom0.o: ${GENERIC_HFILES} xen_dom0.c ${CC} -c ${CRASH_CFLAGS} xen_dom0.c ${WARNING_OPTIONS} ${WARNING_ERROR} ramdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ramdump.c ${CC} -c ${CRASH_CFLAGS} ramdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} vmware_vmss.o: ${GENERIC_HFILES} ${VMWARE_HFILES} vmware_vmss.c ${CC} -c ${CRASH_CFLAGS} vmware_vmss.c ${WARNING_OPTIONS} ${WARNING_ERROR} ${PROGRAM}: force @make --no-print-directory all # Remote daemon functionality has been deprecated. daemon_deprecated: force @echo "WARNING: remote daemon functionality has been deprecated" @echo ${PROGRAM}d: daemon_deprecated make_configure @./configure -d @make --no-print-directory make_build_data @make --no-print-directory daemon daemon: ${DAEMON_OBJECT_FILES} ${CC} ${LDFLAGS} -o ${PROGRAM}d ${DAEMON_OBJECT_FILES} build_data.o -lz files: make_configure @./configure -q -b @make --no-print-directory show_files gdb_files: make_configure @./configure -q -b @echo ${GDB_FILES} ${GDB_PATCH_FILES} show_files: @if [ -f ${PROGRAM} ]; then \ ./${PROGRAM} --no_scroll --no_crashrc -h README > README; fi @echo ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} ${GPL_FILES} README \ .rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} ${MEMORY_DRIVER_FILES} ctags: ctags ${SOURCE_FILES} tar: make_configure @./configure -q -b @make --no-print-directory do_tar do_tar: @if [ -f ${PROGRAM} ]; then \ ./${PROGRAM} --no_scroll --no_crashrc -h README > README; fi tar cvzf ${PROGRAM}.tar.gz ${TAR_FILES} ${GDB_FILES} ${GDB_PATCH_FILES} @echo; ls -l ${PROGRAM}.tar.gz VERSION=7.1.4 RELEASE=0 release: make_configure @if [ "`id --user`" != "0" ]; then \ echo "make release: must be super-user"; exit 1; fi @./configure -P "RPMPKG=${RPMPKG}" -u -g @make --no-print-directory release_configure @echo @echo "cvs tag this release if necessary" release_configure: make_configure @if [ "${GDB}" = "" ] ; then \ echo "make release: GDB not defined: append GDB=gdb-x.x to make command line"; echo; exit 1; fi @./configure -r ${GDB} @make --no-print-directory do_release do_release: @echo "CRASH VERSION: ${VERSION} GDB VERSION: ${GDB}" @if [ ! -f .rh_rpm_package ]; then \ echo "no .rh_rpm_package exists!"; exit 1; fi @chmod 666 .rh_rpm_package @rm -rf ./RELDIR; mkdir ./RELDIR; mkdir ./RELDIR/${PROGRAM}-${VERSION} @rm -f ${PROGRAM}-${VERSION}.tar.gz @rm -f ${PROGRAM}-${VERSION}-${RELEASE}.src.rpm @chown root ./RELDIR/${PROGRAM}-${VERSION} @tar cf - ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} ${GPL_FILES} \ .rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} ${MEMORY_DRIVER_FILES} | \ (cd ./RELDIR/${PROGRAM}-${VERSION}; tar xf -) @cp ${GDB}.tar.gz ./RELDIR/${PROGRAM}-${VERSION} @./${PROGRAM} --no_scroll --no_crashrc -h README > README @cp README ./RELDIR/${PROGRAM}-${VERSION}/README @(cd ./RELDIR; find . -exec chown root {} ";") @(cd ./RELDIR; find . -exec chgrp root {} ";") @(cd ./RELDIR; find . -exec touch {} ";") @(cd ./RELDIR; \ tar czvf ../${PROGRAM}-${VERSION}.tar.gz ${PROGRAM}-${VERSION}) @chgrp root ${PROGRAM}-${VERSION}.tar.gz @rm -rf ./RELDIR @echo @ls -l ${PROGRAM}-${VERSION}.tar.gz @./configure -s -u > ${PROGRAM}.spec @if [ -s ${PROGRAM}.spec ]; then \ rm -rf ./RPMBUILD; \ mkdir -p ./RPMBUILD/SOURCES ./RPMBUILD/SPECS ./RPMBUILD/SRPMS; \ cp ${PROGRAM}-${VERSION}.tar.gz ./RPMBUILD/SOURCES; \ cp ${PROGRAM}.spec ./RPMBUILD/SPECS; \ rpmbuild --define "_sourcedir ./RPMBUILD/SOURCES" \ --define "_srcrpmdir ./RPMBUILD/SRPMS" \ --define "_specdir ./RPMBUILD/SPECS" \ --nodeps -bs ./RPMBUILD/SPECS/${PROGRAM}.spec > /dev/null; \ mv ./RPMBUILD/SRPMS/${PROGRAM}-${VERSION}-${RELEASE}.src.rpm . ; \ rm -rf ./RPMBUILD; \ ls -l ${PROGRAM}-${VERSION}-${RELEASE}.src.rpm; \ fi ref: make ctags cscope cscope: rm -f cscope.files cscope_out for FILE in ${SOURCE_FILES}; do \ echo $$FILE >> cscope.files; done cscope glink: make_configure @./configure -q -b rm -f gdb ln -s ${GDB}/gdb gdb (cd ${GDB}/gdb; rm -f ${PROGRAM}; ln -s ../../${PROGRAM} ${PROGRAM}) name: @echo ${PROGRAM} dis: objdump --disassemble --line-numbers ${PROGRAM} > ${PROGRAM}.dis extensions: make_configure @./configure ${CONF_TARGET_FLAG} -q -b @make --no-print-directory do_extensions do_extensions: @(cd extensions; make -i TARGET=$(TARGET) TARGET_CFLAGS="$(TARGET_CFLAGS)" GDB=$(GDB) GDB_FLAGS=$(GDB_FLAGS)) memory_driver: make_configure @(cd memory_driver; make --no-print-directory -i) crash-7.1.4/tools.c0000775000000000000000000043457312634305150012651 0ustar rootroot/* tools.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include static void print_number(struct number_option *, int, int); static long alloc_hq_entry(void); struct hq_entry; static void dealloc_hq_entry(struct hq_entry *); static void show_options(void); static void dump_struct_members(struct list_data *, int, ulong); static void rbtree_iteration(ulong, struct tree_data *, char *); static void rdtree_iteration(ulong, struct tree_data *, char *, ulong, uint); static void dump_struct_members_for_tree(struct tree_data *, int, ulong); /* * General purpose error reporting routine. Type INFO prints the message * and returns. Type FATAL aborts the command in progress, and longjmps * back to the appropriate recovery location. If a FATAL occurs during * program initialization, exit() is called. * * The idea is to get the message out so that it is seen by the user * regardless of how the command output may be piped or redirected. * Besides stderr, check whether the output is going to a file or pipe, and * if so, intermingle the error message there as well. */ int __error(int type, char *fmt, ...) { int end_of_line, new_line; char buf[BUFSIZE]; char *spacebuf; void *retaddr[NUMBER_STACKFRAMES] = { 0 }; va_list ap; if (CRASHDEBUG(1) || (pc->flags & DROP_CORE)) { SAVE_RETURN_ADDRESS(retaddr); console("error() trace: %lx => %lx => %lx => %lx\n", retaddr[3], retaddr[2], retaddr[1], retaddr[0]); } va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (!fmt && FATAL_ERROR(type)) { fprintf(stdout, "\n"); clean_exit(1); } end_of_line = FATAL_ERROR(type) && !(pc->flags & RUNTIME); if ((new_line = (buf[0] == '\n'))) shift_string_left(buf, 1); else if (pc->flags & PLEASE_WAIT) new_line = TRUE; if (type == CONT) spacebuf = space(strlen(pc->curcmd)); else spacebuf = NULL; if (pc->stdpipe) { fprintf(pc->stdpipe, "%s%s%s %s%s", new_line ? "\n" : "", type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", type == WARNING ? "WARNING: " : type == NOTE ? "NOTE: " : "", buf); fflush(pc->stdpipe); } else { fprintf(stdout, "%s%s%s %s%s", new_line || end_of_line ? "\n" : "", type == WARNING ? "WARNING" : type == NOTE ? "NOTE" : type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", buf, end_of_line ? "\n" : ""); fflush(stdout); } if ((fp != stdout) && (fp != pc->stdpipe) && (fp != pc->tmpfile)) { fprintf(fp, "%s%s%s %s", new_line ? "\n" : "", type == WARNING ? "WARNING" : type == NOTE ? "NOTE" : type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", buf); fflush(fp); } if ((pc->flags & DROP_CORE) && (type != NOTE)) { dump_trace(retaddr); SIGACTION(SIGSEGV, SIG_DFL, &pc->sigaction, NULL); drop_core("DROP_CORE flag set: forcing a segmentation fault\n"); } switch (type) { case FATAL: if (pc->flags & IN_FOREACH) RESUME_FOREACH(); /* FALLTHROUGH */ case FATAL_RESTART: if (pc->flags & RUNTIME) RESTART(); else { if (REMOTE()) remote_exit(); clean_exit(1); } default: case INFO: case NOTE: case WARNING: return FALSE; } } /* * Parse a line into tokens, populate the passed-in argv[] array, and return * the count of arguments found. This function modifies the passed-string * by inserting a NULL character at the end of each token. Expressions * encompassed by parentheses, and strings encompassed by apostrophes, are * collected into single tokens. */ int parse_line(char *str, char *argv[]) { int i, j, k; int string; int expression; for (i = 0; i < MAXARGS; i++) argv[i] = NULL; clean_line(str); if (str == NULL || strlen(str) == 0) return(0); i = j = k = 0; string = expression = FALSE; /* * Special handling for when the first character is a '"'. */ if (str[0] == '"') { next: do { i++; } while ((str[i] != NULLCHAR) && (str[i] != '"')); switch (str[i]) { case NULLCHAR: argv[j] = &str[k]; return j+1; case '"': argv[j++] = &str[k+1]; str[i++] = NULLCHAR; if (str[i] == '"') { k = i; goto next; } break; } } else argv[j++] = str; while (TRUE) { if (j == MAXARGS) error(FATAL, "too many arguments in string!\n"); while (str[i] != ' ' && str[i] != '\t' && str[i] != NULLCHAR) { i++; } switch (str[i]) { case ' ': case '\t': str[i++] = NULLCHAR; while (str[i] == ' ' || str[i] == '\t') { i++; } if (str[i] == '"') { str[i] = ' '; string = TRUE; i++; } if (!string && str[i] == '(') { expression = TRUE; } if (str[i] != NULLCHAR && str[i] != '\n') { argv[j++] = &str[i]; if (string) { string = FALSE; while (str[i] != '"' && str[i] != NULLCHAR) i++; if (str[i] == '"') str[i] = ' '; } if (expression) { expression = FALSE; while (str[i] != ')' && str[i] != NULLCHAR) i++; } break; } /* else fall through */ case '\n': str[i] = NULLCHAR; /* keep falling... */ case NULLCHAR: argv[j] = NULLCHAR; return(j); } } } /* * Defuse controversy re: extensions to ctype.h */ int whitespace(int c) { return ((c == ' ') ||(c == '\t')); } int ascii(int c) { return ((c >= 0) && ( c <= 0x7f)); } /* * Strip line-ending whitespace and linefeeds. */ char * strip_line_end(char *line) { strip_linefeeds(line); strip_ending_whitespace(line); return(line); } /* * Strip line-beginning and line-ending whitespace and linefeeds. */ char * clean_line(char *line) { strip_beginning_whitespace(line); strip_linefeeds(line); strip_ending_whitespace(line); return(line); } /* * Strip line-ending linefeeds in a string. */ char * strip_linefeeds(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == '\n') { *p = NULLCHAR; if (--p < line) break; } return(line); } /* * Strip a specified line-ending character in a string. */ char * strip_ending_char(char *line, char c) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); if (*p == c) *p = NULLCHAR; return(line); } /* * Strip a specified line-beginning character in a string. */ char * strip_beginning_char(char *line, char c) { if (line == NULL || strlen(line) == 0) return(line); if (FIRSTCHAR(line) == c) shift_string_left(line, 1); return(line); } /* * Strip line-ending whitespace. */ char * strip_ending_whitespace(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == ' ' || *p == '\t') { *p = NULLCHAR; if (p == line) break; p--; } return(line); } /* * Strip line-beginning whitespace. */ char * strip_beginning_whitespace(char *line) { char buf[BUFSIZE]; char *p; if (line == NULL || strlen(line) == 0) return(line); strcpy(buf, line); p = &buf[0]; while (*p == ' ' || *p == '\t') p++; strcpy(line, p); return(line); } /* * End line at first comma found. */ char * strip_comma(char *line) { char *p; if ((p = strstr(line, ","))) *p = NULLCHAR; return(line); } /* * Strip the 0x from the beginning of a hexadecimal value string. */ char * strip_hex(char *line) { if (STRNEQ(line, "0x")) shift_string_left(line, 2); return(line); } /* * Turn a string into upper-case. */ char * upper_case(char *s, char *buf) { char *p1, *p2; p1 = s; p2 = buf; while (*p1) { *p2 = toupper(*p1); p1++, p2++; } *p2 = NULLCHAR; return(buf); } /* * Return pointer to first non-space/tab in a string. */ char * first_nonspace(char *s) { return(s + strspn(s, " \t")); } /* * Return pointer to first space/tab in a string. If none are found, * return a pointer to the string terminating NULL. */ char * first_space(char *s) { return(s + strcspn(s, " \t")); } /* * Replace the first space/tab found in a string with a NULL character. */ char * null_first_space(char *s) { char *p1; p1 = first_space(s); if (*p1) *p1 = NULLCHAR; return s; } /* * Replace any instances of the characters in string c that are found in * string s with the character passed in r. */ char * replace_string(char *s, char *c, char r) { int i, j; for (i = 0; s[i]; i++) { for (j = 0; c[j]; j++) { if (s[i] == c[j]) s[i] = r; } } return s; } void string_insert(char *insert, char *where) { char *p; p = GETBUF(strlen(insert) + strlen(where) + 1); sprintf(p, "%s%s", insert, where); strcpy(where, p); FREEBUF(p); } /* * Find the rightmost instance of a substring in a string. */ char * strstr_rightmost(char *s, char *lookfor) { char *next, *last, *p; for (p = s, last = NULL; *p; p++) { if (!(next = strstr(p, lookfor))) break; last = p = next; } return last; } /* * Prints a string verbatim, allowing strings with % signs to be displayed * without printf conversions. */ void print_verbatim(FILE *filep, char *line) { int i; for (i = 0; i < strlen(line); i++) { fputc(line[i], filep); fflush(filep); } } char * fixup_percent(char *s) { char *p1; if ((p1 = strstr(s, "%")) == NULL) return s; s[strlen(s)+1] = NULLCHAR; memmove(p1+1, p1, strlen(p1)); *p1 = '%'; return s; } /* * Convert an indeterminate number string to either a hexadecimal or decimal * long value. Translate with a bias towards decimal unless HEX_BIAS is set. */ ulong stol(char *s, int flags, int *errptr) { if ((flags & HEX_BIAS) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); else { if (decimal(s, 0)) return(dtol(s, flags, errptr)); else if (hexadecimal(s, 0)) return(htol(s, flags, errptr)); } if (!(flags & QUIET)) error(INFO, "not a valid number: %s\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } ulonglong stoll(char *s, int flags, int *errptr) { if ((flags & HEX_BIAS) && hexadecimal(s, 0)) return(htoll(s, flags, errptr)); else { if (decimal(s, 0)) return(dtoll(s, flags, errptr)); else if (hexadecimal(s, 0)) return(htoll(s, flags, errptr)); } if (!(flags & QUIET)) error(INFO, "not a valid number: %s\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Append a two-character string to a number to make 1, 2, 3 and 4 into * 1st, 2nd, 3rd, 4th, and so on... */ char * ordinal(ulong val, char *buf) { char *p1; sprintf(buf, "%ld", val); p1 = &buf[strlen(buf)-1]; switch (*p1) { case '1': strcat(buf, "st"); break; case '2': strcat(buf, "nd"); break; case '3': strcat(buf, "rd"); break; default: strcat(buf, "th"); break; } return buf; } /* * Convert a string into: * * 1. an evaluated expression if it's enclosed within parentheses. * 2. to a decimal value if the string is all decimal characters. * 3. to a hexadecimal value if the string is all hexadecimal characters. * 4. to a symbol value if the string is a known symbol. * * If HEX_BIAS is set, pass the value on to htol(). */ ulong convert(char *s, int flags, int *errptr, ulong numflag) { struct syment *sp; if ((numflag & NUM_EXPR) && can_eval(s)) return(eval(s, flags, errptr)); if ((flags & HEX_BIAS) && (numflag & NUM_HEX) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); else { if ((numflag & NUM_DEC) && decimal(s, 0)) return(dtol(s, flags, errptr)); if ((numflag & NUM_HEX) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); } if ((sp = symbol_search(s))) return(sp->value); error(INFO, "cannot convert \"%s\"\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a hexadecimal long value. */ ulong htol(char *s, int flags, int *errptr) { long i, j; ulong n; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto htol_error; } if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > MAX_HEXADDR_STRLEN) { if (!(flags & QUIET)) error(INFO, "input string too large: \"%s\" (%d vs %d)\n", s, strlen(s), MAX_HEXADDR_STRLEN); goto htol_error; } for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': case 'h': continue; default: if (!(flags & QUIET)) error(INFO, "invalid input: \"%s\"\n", s); goto htol_error; } n = (16 * n) + j; } return(n); htol_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return BADADDR; } /* * Convert a string to a hexadecimal unsigned long long value. */ ulonglong htoll(char *s, int flags, int *errptr) { long i, j; ulonglong n; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto htoll_error; } if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > LONG_LONG_PRLEN) { if (!(flags & QUIET)) error(INFO, "input string too large: \"%s\" (%d vs %d)\n", s, strlen(s), LONG_LONG_PRLEN); goto htoll_error; } for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': case 'h': continue; default: if (!(flags & QUIET)) error(INFO, "invalid input: \"%s\"\n", s); goto htoll_error; } n = (16 * n) + j; } return(n); htoll_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a decimal long value. */ ulong dtol(char *s, int flags, int *errptr) { ulong retval; char *p, *orig; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtol_error; } if (strlen(s) == 0) goto dtol_error; p = orig = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0') { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", orig, s[j]); goto dtol_error; } else if (sscanf(s, "%lu", &retval) != 1) { if (!(flags & QUIET)) error(INFO, "invalid expression\n"); goto dtol_error; } return(retval); dtol_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a decimal long value. */ ulonglong dtoll(char *s, int flags, int *errptr) { ulonglong retval; char *p, *orig; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtoll_error; } if (strlen(s) == 0) goto dtoll_error; p = orig = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0') { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", orig, s[j]); goto dtoll_error; } else if (sscanf(s, "%llu", &retval) != 1) { if (!(flags & QUIET)) error(INFO, "invalid expression\n"); goto dtoll_error; } return (retval); dtoll_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return ((ulonglong)(-1)); } /* * Convert a string to a decimal integer value. */ unsigned int dtoi(char *s, int flags, int *errptr) { unsigned int retval; char *p; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtoi_error; } p = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0' || (sscanf(s, "%d", (int *)&retval) != 1)) { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", s, s[j]); goto dtoi_error; } return(retval); dtoi_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return((unsigned int)(-1)); } /* * Determine whether a string contains only decimal characters. * If count is non-zero, limit the search to count characters. */ int decimal(char *s, int count) { char *p; int cnt, digits; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; for (p = &s[0], digits = 0; *p; p++) { switch(*p) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': digits++; case ' ': break; default: return FALSE; } if (count && (--cnt == 0)) break; } return (digits ? TRUE : FALSE); } /* * Extract a hexadecimal number from a string. If first_instance is FALSE, * and two possibilities are found, a fatal error results. */ int extract_hex(char *s, ulong *result, char stripchar, ulong first_instance) { int i, found; char *arglist[MAXARGS]; int argc; ulong value; char *buf; buf = GETBUF(strlen(s)); strcpy(buf, s); argc = parse_line(buf, arglist); for (i = found = value = 0; i < argc; i++) { if (stripchar) strip_ending_char(arglist[i], stripchar); if (hexadecimal(arglist[i], 0)) { if (found) { FREEBUF(buf); error(FATAL, "two hexadecimal args in: \"%s\"\n", strip_linefeeds(s)); } value = htol(arglist[i], FAULT_ON_ERROR, NULL); found = TRUE; if (first_instance) break; } } FREEBUF(buf); if (found) { *result = value; return TRUE; } return FALSE; } /* * Determine whether a string contains only ASCII characters. */ int ascii_string(char *s) { char *p; for (p = &s[0]; *p; p++) { if (!ascii(*p)) return FALSE; } return TRUE; } /* * Check whether a string contains only printable ASCII characters. */ int printable_string(char *s) { char *p; for (p = &s[0]; *p; p++) { if (!isprint(*p)) return FALSE; } return TRUE; } /* * Determine whether a string contains only hexadecimal characters. * If count is non-zero, limit the search to count characters. */ int hexadecimal(char *s, int count) { char *p; int cnt, digits; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; for (p = &s[0], digits = 0; *p; p++) { switch(*p) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': digits++; case 'x': case 'X': break; case ' ': if (*(p+1) == NULLCHAR) break; else return FALSE; default: return FALSE; } if (count && (--cnt == 0)) break; } return (digits ? TRUE : FALSE); } /* * Determine whether a string contains only hexadecimal characters. * and cannot be construed as a decimal number. * If count is non-zero, limit the search to count characters. */ int hexadecimal_only(char *s, int count) { char *p; int cnt, only; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; only = 0; for (p = &s[0]; *p; p++) { switch(*p) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'x': case 'X': only++; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': break; case ' ': if (*(p+1) == NULLCHAR) break; else return FALSE; default: return FALSE; } if (count && (--cnt == 0)) break; } return only; } /* * Clean a command argument that has an obvious but ignorable error. * The first one is an attached comma to a number, that usually is the * result of a cut-and-paste of an address from a structure display. * The second on is an attached colon to a number, usually from a * cut-and-paste of a memory dump. * Add more when they become annoynance. * * It presumes args[optind] is the argument being tinkered with, and * always returns TRUE for convenience of use. */ int clean_arg(void) { char buf[BUFSIZE]; if (LASTCHAR(args[optind]) == ',' || LASTCHAR(args[optind]) == ':') { strcpy(buf, args[optind]); LASTCHAR(buf) = NULLCHAR; if (IS_A_NUMBER(buf)) LASTCHAR(args[optind]) = NULLCHAR; } return TRUE; } /* * Translate a hexadecimal string into its ASCII components. */ void cmd_ascii(void) { int i; ulonglong value; char *s; int c, prlen, bytes; optind = 1; if (!args[optind]) { fprintf(fp, "\n"); fprintf(fp, " 0 1 2 3 4 5 6 7\n"); fprintf(fp, " +-------------------------------\n"); fprintf(fp, " 0 | NUL DLE SP 0 @ P ' p\n"); fprintf(fp, " 1 | SOH DC1 ! 1 A Q a q\n"); fprintf(fp, " 2 | STX DC2 %c 2 B R b r\n", 0x22); fprintf(fp, " 3 | ETX DC3 # 3 C S c s\n"); fprintf(fp, " 4 | EOT DC4 $ 4 D T d t\n"); fprintf(fp, " 5 | ENQ NAK %c 5 E U e u\n", 0x25); fprintf(fp, " 6 | ACK SYN & 6 F V f v\n"); fprintf(fp, " 7 | BEL ETB ` 7 G W g w\n"); fprintf(fp, " 8 | BS CAN ( 8 H X h x\n"); fprintf(fp, " 9 | HT EM ) 9 I Y i y\n"); fprintf(fp, " A | LF SUB * : J Z j z\n"); fprintf(fp, " B | VT ESC + ; K [ k {\n"); fprintf(fp, " C | FF FS , < L %c l |\n", 0x5c); fprintf(fp, " D | CR GS _ = M ] m }\n"); fprintf(fp, " E | SO RS . > N ^ n ~\n"); fprintf(fp, " F | SI US / ? O - o DEL\n"); fprintf(fp, "\n"); return; } while (args[optind]) { s = args[optind]; if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > LONG_PRLEN) { prlen = LONG_LONG_PRLEN; bytes = sizeof(long long); } else { prlen = LONG_PRLEN; bytes = sizeof(long); } value = htoll(s, FAULT_ON_ERROR, NULL); fprintf(fp, "%.*llx: ", prlen, value); for (i = 0; i < bytes; i++) { c = (value >> (8*i)) & 0xff; if ((c >= 0x20) && (c < 0x7f)) { fprintf(fp, "%c", (char)c); continue; } if (c > 0x7f) { fprintf(fp, "<%02x>", c); continue; } switch (c) { case 0x0: fprintf(fp, ""); break; case 0x1: fprintf(fp, ""); break; case 0x2: fprintf(fp, ""); break; case 0x3: fprintf(fp, ""); break; case 0x4: fprintf(fp, ""); break; case 0x5: fprintf(fp, ""); break; case 0x6: fprintf(fp, ""); break; case 0x7: fprintf(fp, ""); break; case 0x8: fprintf(fp, ""); break; case 0x9: fprintf(fp, ""); break; case 0xa: fprintf(fp, ""); break; case 0xb: fprintf(fp, ""); break; case 0xc: fprintf(fp, ""); break; case 0xd: fprintf(fp, ""); break; case 0xe: fprintf(fp, ""); break; case 0xf: fprintf(fp, ""); break; case 0x10: fprintf(fp, ""); break; case 0x11: fprintf(fp, ""); break; case 0x12: fprintf(fp, ""); break; case 0x13: fprintf(fp, ""); break; case 0x14: fprintf(fp, ""); break; case 0x15: fprintf(fp, ""); break; case 0x16: fprintf(fp, ""); break; case 0x17: fprintf(fp, ""); break; case 0x18: fprintf(fp, ""); break; case 0x19: fprintf(fp, ""); break; case 0x1a: fprintf(fp, ""); break; case 0x1b: fprintf(fp, ""); break; case 0x1c: fprintf(fp, ""); break; case 0x1d: fprintf(fp, ""); break; case 0x1e: fprintf(fp, ""); break; case 0x1f: fprintf(fp, ""); break; case 0x7f: fprintf(fp, ""); break; } } fprintf(fp, "\n"); optind++; } } /* * Counts number of leading whitespace characters in a string. */ int count_leading_spaces(char *s) { return (strspn(s, " \t")); } /* * Prints the requested number of spaces. */ void pad_line(FILE *filep, int cnt, char c) { int i; for (i = 0; i < cnt; i++) fputc(c, filep); } /* * Returns appropriate number of inter-field spaces in a usable string. * MINSPACE is defined as -100, but implies the minimum space between two * fields. Currently this can be either one or two spaces, depending upon * the architecture. Since the mininum space must be at least 1, MINSPACE, * MINSPACE-1 and MINSPACE+1 are all valid, special numbers. Otherwise * the space count must be greater than or equal to 0. * * If the cnt request is greater than SPACES, a dynamic buffer is * allocated, and normal buffer garbage collection will return it * back to the pool. */ char * space(int cnt) { #define SPACES 40 static char spacebuf[SPACES+1] = { 0 }; int i; char *bigspace; if (cnt > SPACES) { bigspace = GETBUF(cnt); for (i = 0; i < cnt; i++) bigspace[i] = ' '; bigspace[i] = NULLCHAR; return bigspace; } if (!strlen(spacebuf)) { for (i = 0; i < SPACES; i++) spacebuf[i] = ' '; spacebuf[i] = NULLCHAR; } if (cnt < (MINSPACE-1)) error(FATAL, "illegal spacing request: %d\n", cnt); if ((cnt > MINSPACE+1) && (cnt < 0)) error(FATAL, "illegal spacing request\n"); switch (cnt) { case (MINSPACE-1): if (VADDR_PRLEN > 8) return (&spacebuf[SPACES]); /* NULL */ else return (&spacebuf[SPACES-1]); /* 1 space */ case MINSPACE: if (VADDR_PRLEN > 8) return (&spacebuf[SPACES-1]); /* 1 space */ else return (&spacebuf[SPACES-2]); /* 2 spaces */ case (MINSPACE+1): if (VADDR_PRLEN > 8) return (&spacebuf[SPACES-2]); /* 2 spaces */ else return (&spacebuf[SPACES-3]); /* 3 spaces */ default: return (&spacebuf[SPACES-cnt]); /* as requested */ } } /* * Determine whether substring s1, with length len, and contained within * string s, is surrounded by characters. If len is 0, calculate * it. */ int bracketed(char *s, char *s1, int len) { char *s2; if (!len) { if (!(s2 = strstr(s1, ">"))) return FALSE; len = s2-s1; } if (((s1-s) < 1) || (*(s1-1) != '<') || ((s1+len) >= &s[strlen(s)]) || (*(s1+len) != '>')) return FALSE; return TRUE; } /* * Counts the number of a specified character in a string. */ int count_chars(char *s, char c) { char *p; int count; if (!s) return 0; count = 0; for (p = s; *p; p++) { if (*p == c) count++; } return count; } /* * Counts the number of a specified characters in a buffer. */ long count_buffer_chars(char *bufptr, char c, long len) { long i, cnt; for (i = cnt = 0; i < len; i++, bufptr++) { if (*bufptr == c) cnt++; } return cnt; } /* * Concatenates the tokens in the global args[] array into one string, * separating each token with one space. If the no_options flag is set, * don't include any args beginning with a dash character. */ char * concat_args(char *buf, int arg, int no_options) { int i; BZERO(buf, BUFSIZE); for (i = arg; i < argcnt; i++) { if (no_options && STRNEQ(args[i], "-")) continue; strcat(buf, args[i]); strcat(buf, " "); } return(strip_ending_whitespace(buf)); } /* * Shifts the contents of a string to the left by cnt characters, * disposing the leftmost characters. */ char * shift_string_left(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s, s+cnt, (origlen-cnt)); *(s+(origlen-cnt)) = NULLCHAR; return(s); } /* * Shifts the contents of a string to the right by cnt characters, * inserting space characters. (caller confirms space is available) */ char * shift_string_right(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s+cnt, s, origlen); s[origlen+cnt] = NULLCHAR; return(memset(s, ' ', cnt)); } /* * Create a string in a buffer of a given size, centering, or justifying * left or right as requested. If the opt argument is used, then the string * is created with its string/integer value. If opt is NULL, then the * string is already in contained in string s (not justified). Note that * flag LONGLONG_HEX implies that opt is a ulonglong pointer to the * actual value. */ char * mkstring(char *s, int size, ulong flags, const char *opt) { int len; int extra; int left; int right; switch (flags & (LONG_DEC|LONG_HEX|INT_HEX|INT_DEC|LONGLONG_HEX|ZERO_FILL)) { case LONG_DEC: sprintf(s, "%lu", (ulong)opt); break; case LONG_HEX: sprintf(s, "%lx", (ulong)opt); break; case (LONG_HEX|ZERO_FILL): if (VADDR_PRLEN == 8) sprintf(s, "%08lx", (ulong)opt); else if (VADDR_PRLEN == 16) sprintf(s, "%016lx", (ulong)opt); break; case INT_DEC: sprintf(s, "%u", (uint)((ulong)opt)); break; case INT_HEX: sprintf(s, "%x", (uint)((ulong)opt)); break; case LONGLONG_HEX: sprintf(s, "%llx", *((ulonglong *)opt)); break; default: if (opt) strcpy(s, opt); break; } /* * At this point, string s has the string to be justified, * and has room to work with. The relevant flags from this * point on are of CENTER, LJUST and RJUST. If the length * of string s is already larger than the requested size, * just return it as is. */ len = strlen(s); if (size <= len) return(s); extra = size - len; if (flags & CENTER) { /* * If absolute centering is not possible, justify the * string as requested -- or to the left if no justify * argument was passed in. */ if (extra % 2) { switch (flags & (LJUST|RJUST)) { default: case LJUST: right = (extra/2) + 1; left = extra/2; break; case RJUST: right = extra/2; left = (extra/2) + 1; break; } } else left = right = extra/2; shift_string_right(s, left); len = strlen(s); memset(s + len, ' ', right); s[len + right] = NULLCHAR; return(s); } if (flags & LJUST) { len = strlen(s); memset(s + len, ' ', extra); s[len + extra] = NULLCHAR; } else if (flags & RJUST) shift_string_right(s, extra); return(s); } /* * Prints the requested number of BACKSPACE characters. */ void backspace(int cnt) { int i; for (i = 0; i < cnt; i++) fprintf(fp, "\b"); } /* * Set/display process context or internal variables. Processes are set * by their task or PID number, or to the panic context with the -p flag. * Internal variables may be viewed or changed, depending whether an argument * follows the variable name. If no arguments are entered, the current * process context is dumped. The current set of variables and their * acceptable settings are: * * debug "on", "off", or any number. "on" sets it to a value of 1. * hash "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * scroll "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * silent "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * refresh "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * sym regular filename * console device filename * radix 10 or 16 * core (no arg) drop core when error() is called. * vi (no arg) set editing mode to vi (from .rc file only). * emacs (no arg) set editing mode to emacs (from .rc file only). * namelist kernel name (from .rc file only). * dumpfile dumpfile name (from .rc file only). * * gdb variable settings not changeable by gdb's "set" command: * * print_max value (default is 200). */ void cmd_set(void) { int i, c; ulong value; int cpu, runtime, from_rc_file; char buf[BUFSIZE]; char *extra_message; struct task_context *tc; struct syment *sp; #define defer() do { } while (0) #define already_done() do { } while (0) #define ignore() do { } while (0) extra_message = NULL; runtime = pc->flags & RUNTIME ? TRUE : FALSE; from_rc_file = pc->curcmd_flags & FROM_RCFILE ? TRUE : FALSE; while ((c = getopt(argcnt, args, "pvc:a:")) != EOF) { switch(c) { case 'c': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) { error(INFO, "not allowed on a live system\n"); argerrs++; break; } cpu = dtoi(optarg, FAULT_ON_ERROR, NULL); set_cpu(cpu); return; case 'p': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) { set_context(NO_TASK, pc->program_pid); show_context(CURRENT_CONTEXT()); return; } if (!tt->panic_task) { error(INFO, "no panic task found!\n"); return; } set_context(tt->panic_task, NO_PID); show_context(CURRENT_CONTEXT()); return; case 'v': if (!runtime) return; show_options(); return; case 'a': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) error(FATAL, "-a option not allowed on live systems\n"); switch (str_to_context(optarg, &value, &tc)) { case STR_PID: if ((i = TASKS_PER_PID(value)) > 1) error(FATAL, "pid %d has %d tasks: " "use a task address\n", value, i); break; case STR_TASK: break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); } cpu = tc->processor; tt->active_set[cpu] = tc->task; if (tt->panic_threads[cpu]) tt->panic_threads[cpu] = tc->task; fprintf(fp, "\"%s\" task %lx has been marked as the active task on cpu %d\n", tc->comm, tc->task, cpu); return; default: argerrs++; break; } } if (argerrs) { if (runtime) cmd_usage(pc->curcmd, SYNOPSIS); return; } if (!args[optind]) { if (XEN_HYPER_MODE()) error(INFO, "requires an option with the Xen hypervisor\n"); else if (pc->flags & MINIMAL_MODE) show_options(); else if (runtime) show_context(CURRENT_CONTEXT()); return; } while (args[optind]) { if (STREQ(args[optind], "debug")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) pc->debug = 1; else if (STREQ(args[optind], "off")) pc->debug = 0; else if (IS_A_NUMBER(args[optind])) pc->debug = stol(args[optind], FAULT_ON_ERROR, NULL); else goto invalid_set_command; } if (runtime) fprintf(fp, "debug: %ld\n", pc->debug); set_lkcd_debug(pc->debug); set_vas_debug(pc->debug); return; } else if (STREQ(args[optind], "hash")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) pc->flags |= HASH; else if (STREQ(args[optind], "off")) pc->flags &= ~HASH; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= HASH; else pc->flags &= ~HASH; } else goto invalid_set_command; } if (runtime) fprintf(fp, "hash: %s\n", pc->flags & HASH ? "on" : "off"); return; } else if (STREQ(args[optind], "unwind")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) { if ((kt->flags & DWARF_UNWIND_CAPABLE) || !runtime) { kt->flags |= DWARF_UNWIND; kt->flags &= ~NO_DWARF_UNWIND; } } else if (STREQ(args[optind], "off")) { kt->flags &= ~DWARF_UNWIND; if (!runtime) kt->flags |= NO_DWARF_UNWIND; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { if ((kt->flags & DWARF_UNWIND_CAPABLE) || !runtime) { kt->flags |= DWARF_UNWIND; kt->flags &= ~NO_DWARF_UNWIND; } } else { kt->flags &= ~DWARF_UNWIND; if (!runtime) kt->flags |= NO_DWARF_UNWIND; } } else goto invalid_set_command; } if (runtime) fprintf(fp, "unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); return; } else if (STREQ(args[optind], "refresh")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) tt->flags |= TASK_REFRESH; else if (STREQ(args[optind], "off")) { tt->flags &= ~TASK_REFRESH; if (!runtime) tt->flags |= TASK_REFRESH_OFF; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) tt->flags |= TASK_REFRESH; else { tt->flags &= ~TASK_REFRESH; if (!runtime) tt->flags |= TASK_REFRESH_OFF; } } else goto invalid_set_command; } if (runtime) fprintf(fp, "refresh: %s\n", tt->flags & TASK_REFRESH ? "on" : "off"); return; } else if (STREQ(args[optind], "gdb")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) { if (pc->flags & MINIMAL_MODE) goto invalid_set_command; else pc->flags2 |= GDB_CMD_MODE; } else if (STREQ(args[optind], "off")) pc->flags2 &= ~GDB_CMD_MODE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { if (pc->flags & MINIMAL_MODE) goto invalid_set_command; else pc->flags2 |= GDB_CMD_MODE; } else pc->flags2 &= ~GDB_CMD_MODE; } else goto invalid_set_command; set_command_prompt(pc->flags2 & GDB_CMD_MODE ? "gdb> " : NULL); } if (runtime) fprintf(fp, "gdb: %s\n", pc->flags2 & GDB_CMD_MODE ? "on" : "off"); return; } else if (STREQ(args[optind], "scroll")) { if (args[optind+1] && pc->scroll_command) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "on")) pc->flags |= SCROLL; else if (STREQ(args[optind], "off")) pc->flags &= ~SCROLL; else if (STREQ(args[optind], "more")) pc->scroll_command = SCROLL_MORE; else if (STREQ(args[optind], "less")) pc->scroll_command = SCROLL_LESS; else if (STREQ(args[optind], "CRASHPAGER")) { if (CRASHPAGER_valid()) pc->scroll_command = SCROLL_CRASHPAGER; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= SCROLL; else pc->flags &= ~SCROLL; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "scroll: %s ", pc->flags & SCROLL ? "on" : "off"); switch (pc->scroll_command) { case SCROLL_LESS: fprintf(fp, "(/usr/bin/less)\n"); break; case SCROLL_MORE: fprintf(fp, "(/bin/more)\n"); break; case SCROLL_NONE: fprintf(fp, "(none)\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); break; } } return; } else if (STREQ(args[optind], "silent")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) { pc->flags |= SILENT; pc->flags &= ~SCROLL; } else if (STREQ(args[optind], "off")) pc->flags &= ~SILENT; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { pc->flags |= SILENT; pc->flags &= ~SCROLL; } else pc->flags &= ~SILENT; } else goto invalid_set_command; if (!(pc->flags & SILENT)) fprintf(fp, "silent: off\n"); } else if (runtime && !(pc->flags & SILENT)) fprintf(fp, "silent: off\n"); return; } else if (STREQ(args[optind], "console")) { int assignment; if (args[optind+1]) { create_console_device(args[optind+1]); optind++; assignment = optind; } else assignment = 0; if (runtime) { fprintf(fp, "console: "); if (pc->console) fprintf(fp, "%s\n", pc->console); else { if (assignment) fprintf(fp, "assignment to %s failed\n", args[assignment]); else fprintf(fp, "not set\n"); } } return; } else if (STREQ(args[optind], "core")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) pc->flags |= DROP_CORE; else if (STREQ(args[optind], "off")) pc->flags &= ~DROP_CORE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= DROP_CORE; else pc->flags &= ~DROP_CORE; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "core: %s on error message)\n", pc->flags & DROP_CORE ? "on (drop core" : "off (do NOT drop core"); } return; } else if (STREQ(args[optind], "radix")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (STREQ(args[optind], "10") || STRNEQ(args[optind], "dec") || STRNEQ(args[optind], "ten")) pc->output_radix = 10; else if (STREQ(args[optind], "16") || STRNEQ(args[optind], "hex") || STRNEQ(args[optind], "six")) pc->output_radix = 16; else goto invalid_set_command; } if (runtime) { sprintf(buf, "set output-radix %d", pc->output_radix); gdb_pass_through(buf, NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: %d (%s)\n", pc->output_radix, pc->output_radix == 10 ? "decimal" : "hex"); } return; } else if (STREQ(args[optind], "hex")) { if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (runtime) { pc->output_radix = 16; gdb_pass_through("set output-radix 16", NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: 16 (hex)\n"); } return; } else if (STREQ(args[optind], "dec")) { if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (runtime) { pc->output_radix = 10; gdb_pass_through("set output-radix 10", NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: 10 (decimal)\n"); } return; } else if (STREQ(args[optind], "edit")) { if (args[optind+1]) { if (runtime && !from_rc_file) error(FATAL, "cannot change editing mode during runtime\n"); optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "vi")) pc->editing_mode = "vi"; else if (STREQ(args[optind], "emacs")) pc->editing_mode = "emacs"; else goto invalid_set_command; } if (runtime) fprintf(fp, "edit: %s\n", pc->editing_mode); return; } else if (STREQ(args[optind], "vi")) { if (runtime) { if (!from_rc_file) error(FATAL, "cannot change editing mode during runtime\n"); fprintf(fp, "edit: %s\n", pc->editing_mode); } else pc->editing_mode = "vi"; return; } else if (STREQ(args[optind], "emacs")) { if (runtime) { if (!from_rc_file) error(FATAL, "cannot change %s editing mode during runtime\n", pc->editing_mode); fprintf(fp, "edit: %s\n", pc->editing_mode); } else pc->editing_mode = "emacs"; return; } else if (STREQ(args[optind], "print_max")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (decimal(args[optind], 0)) *gdb_print_max = atoi(args[optind]); else if (hexadecimal(args[optind], 0)) *gdb_print_max = (unsigned int) htol(args[optind], FAULT_ON_ERROR, NULL); else goto invalid_set_command; } if (runtime) fprintf(fp, "print_max: %d\n", *gdb_print_max); return; } else if (STREQ(args[optind], "scope")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (can_eval(args[optind])) value = eval(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) value = htol(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) value = sp->value; else goto invalid_set_command; if (runtime) { if (gdb_set_crash_scope(value, args[optind])) pc->scope = value; else return; } } if (runtime) { fprintf(fp, "scope: %lx ", pc->scope); if (pc->scope) fprintf(fp, "(%s)\n", value_to_symstr(pc->scope, buf, 0)); else fprintf(fp, "(not set)\n"); } return; } else if (STREQ(args[optind], "null-stop")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (STREQ(args[optind], "on")) *gdb_stop_print_at_null = 1; else if (STREQ(args[optind], "off")) *gdb_stop_print_at_null = 0; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) *gdb_stop_print_at_null = 1; else *gdb_stop_print_at_null = 0; } else goto invalid_set_command; } if (runtime) fprintf(fp, "null-stop: %s\n", *gdb_stop_print_at_null ? "on" : "off"); return; } else if (STREQ(args[optind], "print_array")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (STREQ(args[optind], "on")) *gdb_prettyprint_arrays = 1; else if (STREQ(args[optind], "off")) *gdb_prettyprint_arrays = 0; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) *gdb_prettyprint_arrays = 1; else *gdb_prettyprint_arrays = 0; } else goto invalid_set_command; } if (runtime) fprintf(fp, "print_array: %s\n", *gdb_prettyprint_arrays ? "on" : "off"); return; } else if (STREQ(args[optind], "namelist")) { optind++; if (!runtime && args[optind]) { if (!is_elf_file(args[optind])) error(FATAL, "%s: not a kernel namelist (from .%src file)\n", args[optind], pc->program_name); if ((pc->namelist = (char *) malloc(strlen(args[optind])+1)) == NULL) { error(INFO, "cannot malloc memory for namelist: %s: %s\n", args[optind], strerror(errno)); } else strcpy(pc->namelist, args[optind]); } if (runtime) fprintf(fp, "namelist: %s\n", pc->namelist); return; } else if (STREQ(args[optind], "free")) { if (!runtime) defer(); else fprintf(fp, "%d pages freed\n", dumpfile_memory(DUMPFILE_FREE_MEM)); return; } else if (STREQ(args[optind], "data_debug")) { pc->flags |= DATADEBUG; return; } else if (STREQ(args[optind], "zero_excluded")) { if (args[optind+1]) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "on")) { *diskdump_flags |= ZERO_EXCLUDED; sadump_set_zero_excluded(); } else if (STREQ(args[optind], "off")) { *diskdump_flags &= ~ZERO_EXCLUDED; sadump_unset_zero_excluded(); } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { *diskdump_flags |= ZERO_EXCLUDED; sadump_set_zero_excluded(); } else { *diskdump_flags &= ~ZERO_EXCLUDED; sadump_unset_zero_excluded(); } } else goto invalid_set_command; } if (runtime) fprintf(fp, "zero_excluded: %s\n", (*diskdump_flags & ZERO_EXCLUDED) || sadump_is_zero_excluded() ? "on" : "off"); return; } else if (STREQ(args[optind], "offline")) { if (args[optind+1]) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "show")) pc->flags2 &= ~OFFLINE_HIDE; else if(STREQ(args[optind], "hide")) pc->flags2 |= OFFLINE_HIDE; else goto invalid_set_command; } if (runtime) fprintf(fp, " offline: %s\n", pc->flags2 & OFFLINE_HIDE ? "hide" : "show"); return; } else if (XEN_HYPER_MODE()) { error(FATAL, "invalid argument for the Xen hypervisor\n"); } else if (pc->flags & MINIMAL_MODE) { error(FATAL, "invalid argument in minimal mode\n"); } else if (runtime) { ulong pid, task; switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: pid = value; task = NO_TASK; if (set_context(task, pid)) show_context(CURRENT_CONTEXT()); break; case STR_TASK: task = value; pid = NO_PID; if (set_context(task, pid)) show_context(CURRENT_CONTEXT()); break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n", args[optind]); break; } } else console("set: ignoring \"%s\"\n", args[optind]); optind++; } return; invalid_set_command: sprintf(buf, "invalid command"); if (!runtime) sprintf(&buf[strlen(buf)], " in .%src file", pc->program_name); strcat(buf, ": "); for (i = 0; i < argcnt; i++) sprintf(&buf[strlen(buf)], "%s ", args[i]); strcat(buf, "\n"); if (extra_message) strcat(buf, extra_message); error(runtime ? FATAL : INFO, buf); #undef defer #undef already_done #undef ignore } /* * Display the set of settable internal variables. */ static void show_options(void) { char buf[BUFSIZE]; fprintf(fp, " scroll: %s ", pc->flags & SCROLL ? "on" : "off"); switch (pc->scroll_command) { case SCROLL_LESS: fprintf(fp, "(/usr/bin/less)\n"); break; case SCROLL_MORE: fprintf(fp, "(/bin/more)\n"); break; case SCROLL_NONE: fprintf(fp, "(none)\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); break; } fprintf(fp, " radix: %d (%s)\n", pc->output_radix, pc->output_radix == 10 ? "decimal" : pc->output_radix == 16 ? "hexadecimal" : "unknown"); fprintf(fp, " refresh: %s\n", tt->flags & TASK_REFRESH ? "on" : "off"); fprintf(fp, " print_max: %d\n", *gdb_print_max); fprintf(fp, " print_array: %s\n", *gdb_prettyprint_arrays ? "on" : "off"); fprintf(fp, " console: %s\n", pc->console ? pc->console : "(not assigned)"); fprintf(fp, " debug: %ld\n", pc->debug); fprintf(fp, " core: %s\n", pc->flags & DROP_CORE ? "on" : "off"); fprintf(fp, " hash: %s\n", pc->flags & HASH ? "on" : "off"); fprintf(fp, " silent: %s\n", pc->flags & SILENT ? "on" : "off"); fprintf(fp, " edit: %s\n", pc->editing_mode); fprintf(fp, " namelist: %s\n", pc->namelist); fprintf(fp, " dumpfile: %s\n", pc->dumpfile); fprintf(fp, " unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); fprintf(fp, " zero_excluded: %s\n", *diskdump_flags & ZERO_EXCLUDED ? "on" : "off"); fprintf(fp, " null-stop: %s\n", *gdb_stop_print_at_null ? "on" : "off"); fprintf(fp, " gdb: %s\n", pc->flags2 & GDB_CMD_MODE ? "on" : "off"); fprintf(fp, " scope: %lx ", pc->scope); if (pc->scope) fprintf(fp, "(%s)\n", value_to_symstr(pc->scope, buf, 0)); else fprintf(fp, "(not set)\n"); fprintf(fp, " offline: %s\n", pc->flags2 & OFFLINE_HIDE ? "hide" : "show"); } /* * Evaluate an expression, which can consist of a single symbol, single value, * or an expression consisting of two values and an operator. If the * expression contains redirection characters, the whole expression must * be enclosed with parentheses. The result is printed in decimal, hex, * octal and binary. Input number values can only be hex or decimal, with * a bias towards decimal (use 0x when necessary). */ void cmd_eval(void) { int flags; int bitflag, longlongflag, longlongflagforce; struct number_option nopt; char buf1[BUFSIZE]; /* * getopt() is not used to avoid confusion with minus sign. */ optind = 1; bitflag = 0; longlongflag = longlongflagforce = 0; BZERO(&nopt, sizeof(struct number_option)); if (STREQ(args[optind], "-lb") || STREQ(args[optind], "-bl")) { longlongflagforce++; bitflag++; optind++; } else if (STREQ(args[optind], "-l")) { longlongflagforce++; optind++; if (STREQ(args[optind], "-b") && args[optind+1]) { optind++; bitflag++; } } else if (STREQ(args[optind], "-b")) { if (STREQ(args[optind+1], "-l")) { if (args[optind+2]) { bitflag++; longlongflagforce++; optind += 2; } else cmd_usage(pc->curcmd, SYNOPSIS); } else if (args[optind+1]) { bitflag++; optind++; } } if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); longlongflag = BITS32() ? TRUE : FALSE; flags = longlongflag ? (LONG_LONG|RETURN_ON_ERROR) : FAULT_ON_ERROR; if(!BITS32()) longlongflagforce = 0; BZERO(buf1, BUFSIZE); buf1[0] = '('; while (args[optind]) { if (*args[optind] == '(') { if (eval_common(args[optind], flags, NULL, &nopt)) print_number(&nopt, bitflag, longlongflagforce); else error(FATAL, "invalid expression: %s\n", args[optind]); return; } else { strcat(buf1, args[optind]); strcat(buf1, " "); } optind++; } clean_line(buf1); strcat(buf1, ")"); if (eval_common(buf1, flags, NULL, &nopt)) print_number(&nopt, bitflag, longlongflagforce); else error(FATAL, "invalid expression: %s\n", buf1); } /* * Pre-check a string for eval-worthiness. This allows callers to avoid * having to encompass a non-whitespace expression with parentheses. * Note that the data being evaluated is not error-checked here, but * rather that it exists in the proper format. */ int can_eval(char *s) { char *op; char *element1, *element2; char work[BUFSIZE]; /* * If we've got a () pair containing any sort of stuff in between, * then presume it's eval-able. It might contain crap, but it * should be sent to eval() regardless. */ if ((FIRSTCHAR(s) == '(') && (count_chars(s, '(') == 1) && (count_chars(s, ')') == 1) && (strlen(s) > 2) && (LASTCHAR(s) == ')')) return TRUE; /* * If the string contains any of the operators except the shifters, * and has any kind of data on either side, it's also eval-able. */ strcpy(work, s); if (!(op = strpbrk(work, "><+-&|*/%^"))) return FALSE; element1 = &work[0]; *op = NULLCHAR; element2 = op+1; if (!strlen(element1) || !strlen(element2)) return FALSE; return TRUE; } /* * Evaluate an expression involving two values and an operator. */ #define OP_ADD (1) #define OP_SUB (2) #define OP_AND (3) #define OP_OR (4) #define OP_MUL (5) #define OP_DIV (6) #define OP_MOD (7) #define OP_SL (8) #define OP_SR (9) #define OP_EXOR (10) #define OP_POWER (11) ulong eval(char *s, int flags, int *errptr) { struct number_option nopt; if (eval_common(s, flags, errptr, &nopt)) { return(nopt.num); } else { switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: error(FATAL, "invalid expression: %s\n", s); case RETURN_ON_ERROR: error(INFO, "invalid expression: %s\n", s); if (errptr) *errptr = TRUE; break; } return UNUSED; } } ulonglong evall(char *s, int flags, int *errptr) { struct number_option nopt; if (BITS32()) flags |= LONG_LONG; if (eval_common(s, flags, errptr, &nopt)) { return(nopt.ll_num); } else { switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: error(FATAL, "invalid expression: %s\n", s); case RETURN_ON_ERROR: error(INFO, "invalid expression: %s\n", s); if (errptr) *errptr = TRUE; break; } return UNUSED; } } int eval_common(char *s, int flags, int *errptr, struct number_option *np) { char *p1, *p2; char *op, opcode; ulong value1; ulong value2; ulonglong ll_value1; ulonglong ll_value2; char work[BUFSIZE]; char *element1; char *element2; struct syment *sp; opcode = 0; value1 = value2 = 0; ll_value1 = ll_value2 = 0; if (strstr(s, "(") || strstr(s, ")")) { p1 = s; if (*p1 != '(') goto malformed; if (LASTCHAR(s) != ')') goto malformed; p2 = &LASTCHAR(s); if (strstr(s, ")") != p2) goto malformed; strcpy(work, p1+1); LASTCHAR(work) = NULLCHAR; if (strstr(work, "(") || strstr(work, ")")) goto malformed; } else strcpy(work, s); if (work[0] == '-') { shift_string_right(work, 1); work[0] = '0'; } if (!(op = strpbrk(work, "#><+-&|*/%^"))) { if (calculate(work, &value1, &ll_value1, flags & (HEX_BIAS|LONG_LONG))) { if (flags & LONG_LONG) { np->ll_num = ll_value1; if (BITS32() && (ll_value1 > 0xffffffff)) np->retflags |= LONG_LONG; return TRUE; } else { np->num = value1; return TRUE; } } goto malformed; } switch (*op) { case '+': opcode = OP_ADD; break; case '-': opcode = OP_SUB; break; case '&': opcode = OP_AND; break; case '|': opcode = OP_OR; break; case '*': opcode = OP_MUL; break; case '%': opcode = OP_MOD; break; case '/': opcode = OP_DIV; break; case '<': if (*(op+1) != '<') goto malformed; opcode = OP_SL; break; case '>': if (*(op+1) != '>') goto malformed; opcode = OP_SR; break; case '^': opcode = OP_EXOR; break; case '#': opcode = OP_POWER; break; } element1 = &work[0]; *op = NULLCHAR; if ((opcode == OP_SL) || (opcode == OP_SR)) { *(op+1) = NULLCHAR; element2 = op+2; } else element2 = op+1; if (strlen(clean_line(element1)) == 0) goto malformed; if (strlen(clean_line(element2)) == 0) goto malformed; if ((sp = symbol_search(element1))) value1 = ll_value1 = sp->value; else { if (!calculate(element1, &value1, &ll_value1, flags & (HEX_BIAS|LONG_LONG))) goto malformed; if (BITS32() && (ll_value1 > 0xffffffff)) np->retflags |= LONG_LONG; } if ((sp = symbol_search(element2))) value2 = ll_value2 = sp->value; else if (!calculate(element2, &value2, &ll_value2, flags & (HEX_BIAS|LONG_LONG))) goto malformed; if (flags & LONG_LONG) { if (BITS32() && (ll_value2 > 0xffffffff)) np->retflags |= LONG_LONG; switch (opcode) { case OP_ADD: np->ll_num = (ll_value1 + ll_value2); break; case OP_SUB: np->ll_num = (ll_value1 - ll_value2); break; case OP_AND: np->ll_num = (ll_value1 & ll_value2); break; case OP_OR: np->ll_num = (ll_value1 | ll_value2); break; case OP_MUL: np->ll_num = (ll_value1 * ll_value2); break; case OP_DIV: np->ll_num = (ll_value1 / ll_value2); break; case OP_MOD: np->ll_num = (ll_value1 % ll_value2); break; case OP_SL: np->ll_num = (ll_value1 << ll_value2); break; case OP_SR: np->ll_num = (ll_value1 >> ll_value2); break; case OP_EXOR: np->ll_num = (ll_value1 ^ ll_value2); break; case OP_POWER: np->ll_num = ll_power(ll_value1, ll_value2); break; } } else { switch (opcode) { case OP_ADD: np->num = (value1 + value2); break; case OP_SUB: np->num = (value1 - value2); break; case OP_AND: np->num = (value1 & value2); break; case OP_OR: np->num = (value1 | value2); break; case OP_MUL: np->num = (value1 * value2); break; case OP_DIV: np->num = (value1 / value2); break; case OP_MOD: np->num = (value1 % value2); break; case OP_SL: np->num = (value1 << value2); break; case OP_SR: np->num = (value1 >> value2); break; case OP_EXOR: np->num = (value1 ^ value2); break; case OP_POWER: np->num = power(value1, value2); break; } } return TRUE; malformed: return FALSE; } /* * Take string containing a number, and possibly a multiplier, and calculate * its real value. The allowable multipliers are k, K, m, M, g and G, for * kilobytes, megabytes and gigabytes. */ int calculate(char *s, ulong *value, ulonglong *llvalue, ulong flags) { ulong factor, bias; int errflag; int ones_complement; ulong localval; ulonglong ll_localval; struct syment *sp; bias = flags & HEX_BIAS; if (*s == '~') { ones_complement = TRUE; s++; } else ones_complement = FALSE; if ((sp = symbol_search(s))) { if (flags & LONG_LONG) { *llvalue = (ulonglong)sp->value; if (ones_complement) *llvalue = ~(*llvalue); } else *value = ones_complement ? ~(sp->value) : sp->value; return TRUE; } factor = 1; errflag = 0; switch (LASTCHAR(s)) { case 'k': case 'K': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = 1024; else return FALSE; break; case 'm': case 'M': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = (1024*1024); else return FALSE; break; case 'g': case 'G': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = (1024*1024*1024); else return FALSE; break; default: if (!IS_A_NUMBER(s)) return FALSE; break; } if (flags & LONG_LONG) { ll_localval = stoll(s, RETURN_ON_ERROR|bias, &errflag); if (errflag) return FALSE; if (ones_complement) *llvalue = ~(ll_localval * factor); else *llvalue = ll_localval * factor; } else { localval = stol(s, RETURN_ON_ERROR|bias, &errflag); if (errflag) return FALSE; if (ones_complement) *value = ~(localval * factor); else *value = localval * factor; } return TRUE; } /* * Print a 32-bit or 64-bit number in hexadecimal, decimal, octal and binary, * also showing the bits set if appropriate. * */ static void print_number(struct number_option *np, int bitflag, int longlongflagforce) { int i; ulong hibit; ulonglong ll_hibit; int ccnt; ulong mask; ulonglong ll_mask; char *hdr = " bits set: "; char buf[BUFSIZE]; int hdrlen; int longlongformat; longlongformat = longlongflagforce; if (!longlongflagforce) { if (BITS32()) { if (np->retflags & LONG_LONG) longlongformat = TRUE; if (np->ll_num > 0xffffffff) longlongformat = TRUE; else np->num = (ulong)np->ll_num; } } if (longlongformat) { ll_hibit = (ulonglong)(1) << ((sizeof(long long)*8)-1); fprintf(fp, "hexadecimal: %llx ", np->ll_num); if (np->ll_num >= KILOBYTES(1)) { if ((np->ll_num % GIGABYTES(1)) == 0) fprintf(fp, "(%lldGB)", np->ll_num / GIGABYTES(1)); else if ((np->ll_num % MEGABYTES(1)) == 0) fprintf(fp, "(%lldMB)", np->ll_num / MEGABYTES(1)); else if ((np->ll_num % KILOBYTES(1)) == 0) fprintf(fp, "(%lldKB)", np->ll_num / KILOBYTES(1)); } fprintf(fp, "\n"); fprintf(fp, " decimal: %llu ", np->ll_num); if ((long long)np->ll_num < 0) fprintf(fp, "(%lld)\n", (long long)np->ll_num); else fprintf(fp, "\n"); fprintf(fp, " octal: %llo\n", np->ll_num); fprintf(fp, " binary: "); for(i = 0, ll_mask = np->ll_num; i < (sizeof(long long)*8); i++, ll_mask <<= 1) if (ll_mask & ll_hibit) fprintf(fp, "1"); else fprintf(fp, "0"); fprintf(fp,"\n"); } else { hibit = (ulong)(1) << ((sizeof(long)*8)-1); fprintf(fp, "hexadecimal: %lx ", np->num); if (np->num >= KILOBYTES(1)) { if ((np->num % GIGABYTES(1)) == 0) fprintf(fp, "(%ldGB)", np->num / GIGABYTES(1)); else if ((np->num % MEGABYTES(1)) == 0) fprintf(fp, "(%ldMB)", np->num / MEGABYTES(1)); else if ((np->num % KILOBYTES(1)) == 0) fprintf(fp, "(%ldKB)", np->num / KILOBYTES(1)); } fprintf(fp, "\n"); fprintf(fp, " decimal: %lu ", np->num); if ((long)np->num < 0) fprintf(fp, "(%ld)\n", (long)np->num); else fprintf(fp, "\n"); fprintf(fp, " octal: %lo\n", np->num); fprintf(fp, " binary: "); for(i = 0, mask = np->num; i < (sizeof(long)*8); i++, mask <<= 1) if (mask & hibit) fprintf(fp, "1"); else fprintf(fp, "0"); fprintf(fp,"\n"); } if (!bitflag) return; hdrlen = strlen(hdr); ccnt = hdrlen; fprintf(fp, "%s", hdr); if (longlongformat) { for (i = 63; i >= 0; i--) { ll_mask = (ulonglong)(1) << i; if (np->ll_num & ll_mask) { sprintf(buf, "%d ", i); fprintf(fp, "%s", buf); ccnt += strlen(buf); if (ccnt >= 77) { fprintf(fp, "\n"); INDENT(strlen(hdr)); ccnt = hdrlen; } } } } else { for (i = BITS()-1; i >= 0; i--) { mask = (ulong)(1) << i; if (np->num & mask) { sprintf(buf, "%d ", i); fprintf(fp, "%s", buf); ccnt += strlen(buf); if (ccnt >= 77) { fprintf(fp, "\n"); INDENT(strlen(hdr)); ccnt = hdrlen; } } } } fprintf(fp, "\n"); } /* * Display the contents of a linked list. Minimum requirements are a starting * address, typically of a structure which contains the "next" list entry at * some offset into the structure. The default offset is zero bytes, and need * not be entered if that's the case. Otherwise a number argument that's not * a kernel * virtual address will be understood to be the offset. * Alternatively the offset may be entered in "struct.member" format. Each * item in the list is dumped, and the list will be considered terminated upon * encountering a "next" value that is: * * a NULL pointer. * a pointer to the starting address. * a pointer to the entry pointed to by the starting address. * a pointer to the structure itself. * a pointer to the value specified with the "-e ending_addr" option. * * If the structures are linked using list_head structures, the -h or -H * options must be used. In that case, the "start" address is: * a pointer to the structure that contains the list_head structure (-h), * or a pointer to a LIST_HEAD() structure (-H). * * Given that the contents of the structures containing the next pointers * often contain useful data, the "-s structname" also prints each structure * in the list. * * By default, the list members are hashed to guard against duplicate entries * causing the list to wrap back upon itself. * * WARNING: There's an inordinate amount of work parsing arguments below * in order to maintain backwards compatibility re: not having to use -o, * which gets sticky with zero-based kernel virtual address space. */ void cmd_list(void) { int c; struct list_data list_data, *ld; struct datatype_member struct_member, *sm; struct syment *sp; ulong value, struct_list_offset; sm = &struct_member; ld = &list_data; BZERO(ld, sizeof(struct list_data)); struct_list_offset = 0; while ((c = getopt(argcnt, args, "Hhrs:e:o:xdl:")) != EOF) { switch(c) { case 'H': ld->flags |= LIST_HEAD_FORMAT; ld->flags |= LIST_HEAD_POINTER; break; case 'h': ld->flags |= LIST_HEAD_FORMAT; break; case 'r': ld->flags |= LIST_HEAD_REVERSE; break; case 's': if (ld->structname_args++ == 0) hq_open(); hq_enter((ulong)optarg); break; case 'l': if (IS_A_NUMBER(optarg)) struct_list_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) struct_list_offset = sm->member_offset; else error(FATAL, "invalid -l option: %s\n", optarg); break; case 'o': if (ld->flags & LIST_OFFSET_ENTERED) error(FATAL, "offset value %d (0x%lx) already entered\n", ld->member_offset, ld->member_offset); else if (IS_A_NUMBER(optarg)) ld->member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) ld->member_offset = sm->member_offset; else error(FATAL, "invalid -o argument: %s\n", optarg); ld->flags |= LIST_OFFSET_ENTERED; break; case 'e': ld->end = htol(optarg, FAULT_ON_ERROR, NULL); break; case 'x': if (ld->flags & LIST_STRUCT_RADIX_10) error(FATAL, "-d and -x are mutually exclusive\n"); ld->flags |= LIST_STRUCT_RADIX_16; break; case 'd': if (ld->flags & LIST_STRUCT_RADIX_16) error(FATAL, "-d and -x are mutually exclusive\n"); ld->flags |= LIST_STRUCT_RADIX_10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind] && args[optind+1] && args[optind+2]) { error(INFO, "too many arguments\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (ld->structname_args) { ld->structname = (char **)GETBUF(sizeof(char *) * ld->structname_args); retrieve_list((ulong *)ld->structname, ld->structname_args); hq_close(); ld->struct_list_offset = struct_list_offset; } else if (struct_list_offset) { error(INFO, "-l option can only be used with -s option\n"); cmd_usage(pc->curcmd, SYNOPSIS); } while (args[optind]) { if (strstr(args[optind], ".") && arg_to_datatype(args[optind], sm, RETURN_ON_ERROR) > 1) { if (ld->flags & LIST_OFFSET_ENTERED) error(FATAL, "offset value %ld (0x%lx) already entered\n", ld->member_offset, ld->member_offset); ld->member_offset = sm->member_offset; ld->flags |= LIST_OFFSET_ENTERED; } else { /* * Do an inordinate amount of work to avoid -o... * * OK, if it's a symbol, then it has to be a start. */ if ((sp = symbol_search(args[optind]))) { if (ld->flags & LIST_START_ENTERED) error(FATAL, "list start already entered\n"); ld->start = sp->value; ld->flags |= LIST_START_ENTERED; goto next_arg; } /* * If it's not a symbol nor a number, bail out if it * cannot be evaluated as a start address. */ if (!IS_A_NUMBER(args[optind])) { if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { if (ld->flags & LIST_START_ENTERED) error(FATAL, "list start already entered\n"); ld->start = value; ld->flags |= LIST_START_ENTERED; goto next_arg; } } error(FATAL, "invalid argument: %s\n", args[optind]); } /* * If the start is known, it's got to be an offset. */ if (ld->flags & LIST_START_ENTERED) { value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; break; } /* * If the offset is known, or there's no subsequent * argument, then it's got to be a start. */ if ((ld->flags & LIST_OFFSET_ENTERED) || !args[optind+1]) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (!IS_KVADDR(value)) error(FATAL, "invalid kernel virtual address: %s\n", args[optind]); ld->start = value; ld->flags |= LIST_START_ENTERED; break; } /* * Neither start nor offset has been entered, and * it's a number. Look ahead to the next argument. * If it's a symbol, then this must be an offset. */ if ((sp = symbol_search(args[optind+1]))) { value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; goto next_arg; } else if ((!IS_A_NUMBER(args[optind+1]) && !can_eval(args[optind+1])) && !strstr(args[optind+1], ".")) error(FATAL, "symbol not found: %s\n", args[optind+1]); /* * Crunch time. We've got two numbers. If they're * both ambigous we must have zero-based kernel * virtual address space. */ if (COMMON_VADDR_SPACE() && AMBIGUOUS_NUMBER(args[optind]) && AMBIGUOUS_NUMBER(args[optind+1])) { error(INFO, "ambiguous arguments: \"%s\" and \"%s\": -o is required\n", args[optind], args[optind+1]); cmd_usage(pc->curcmd, SYNOPSIS); } if (hexadecimal_only(args[optind], 0)) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { ld->start = value; ld->flags |= LIST_START_ENTERED; goto next_arg; } } value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; } next_arg: optind++; } if (!(ld->flags & LIST_START_ENTERED)) { error(INFO, "starting address required\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if ((ld->flags & LIST_OFFSET_ENTERED) && ld->struct_list_offset) { error(INFO, "-l and -o are mutually exclusive\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (ld->flags & LIST_HEAD_FORMAT) { ld->list_head_offset = ld->member_offset; if (ld->flags & LIST_HEAD_REVERSE) ld->member_offset = sizeof(void *); else ld->member_offset = 0; if (ld->flags & LIST_HEAD_POINTER) { if (!ld->end) ld->end = ld->start; readmem(ld->start + ld->member_offset, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (ld->start == ld->end) { fprintf(fp, "(empty)\n"); return; } } else ld->start += ld->list_head_offset; } ld->flags &= ~(LIST_OFFSET_ENTERED|LIST_START_ENTERED); ld->flags |= VERBOSE; hq_open(); c = do_list(ld); hq_close(); if (ld->structname_args) FREEBUF(ld->structname); } /* * Does the work for cmd_list() and any other function that requires the * contents of a linked list. See cmd_list description above for details. */ int do_list(struct list_data *ld) { ulong next, last, first; ulong searchfor, readflag; int i, count, others, close_hq_on_return; unsigned int radix; if (CRASHDEBUG(1)) { others = 0; console(" flags: %lx (", ld->flags); if (ld->flags & VERBOSE) console("%sVERBOSE", others++ ? "|" : ""); if (ld->flags & LIST_OFFSET_ENTERED) console("%sLIST_OFFSET_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_START_ENTERED) console("%sLIST_START_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_FORMAT) console("%sLIST_HEAD_FORMAT", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_POINTER) console("%sLIST_HEAD_POINTER", others++ ? "|" : ""); if (ld->flags & RETURN_ON_DUPLICATE) console("%sRETURN_ON_DUPLICATE", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_10) console("%sLIST_STRUCT_RADIX_10", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_16) console("%sLIST_STRUCT_RADIX_16", others++ ? "|" : ""); if (ld->flags & LIST_ALLOCATE) console("%sLIST_ALLOCATE", others++ ? "|" : ""); if (ld->flags & LIST_CALLBACK) console("%sLIST_CALLBACK", others++ ? "|" : ""); if (ld->flags & CALLBACK_RETURN) console("%sCALLBACK_RETURN", others++ ? "|" : ""); console(")\n"); console(" start: %lx\n", ld->start); console(" member_offset: %ld\n", ld->member_offset); console(" list_head_offset: %ld\n", ld->list_head_offset); console(" end: %lx\n", ld->end); console(" searchfor: %lx\n", ld->searchfor); console(" structname_args: %lx\n", ld->structname_args); if (!ld->structname_args) console(" structname: (unused)\n"); for (i = 0; i < ld->structname_args; i++) console(" structname[%d]: %s\n", i, ld->structname[i]); console(" header: %s\n", ld->header); console(" list_ptr: %lx\n", (ulong)ld->list_ptr); console(" callback_func: %lx\n", (ulong)ld->callback_func); console(" callback_data: %lx\n", (ulong)ld->callback_data); console("struct_list_offset: %lx\n", ld->struct_list_offset); } count = 0; searchfor = ld->searchfor; ld->searchfor = 0; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; next = ld->start; close_hq_on_return = FALSE; if (ld->flags & LIST_ALLOCATE) { if (!hq_is_open()) { hq_open(); close_hq_on_return = TRUE; } else if (hq_is_inuse()) { error(ld->flags & RETURN_ON_LIST_ERROR ? INFO : FATAL, "\ndo_list: hash queue is in use?\n"); return -1; } } readflag = ld->flags & RETURN_ON_LIST_ERROR ? (RETURN_ON_ERROR|QUIET) : FAULT_ON_ERROR; if (!readmem(next + ld->member_offset, KVADDR, &first, sizeof(void *), "first list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } if (ld->header) fprintf(fp, "%s", ld->header); while (1) { if (ld->flags & VERBOSE) { fprintf(fp, "%lx\n", next - ld->list_head_offset); if (ld->structname) { for (i = 0; i < ld->structname_args; i++) { switch (count_chars(ld->structname[i], '.')) { case 0: dump_struct(ld->structname[i], next - ld->list_head_offset - ld->struct_list_offset, radix); break; default: dump_struct_members(ld, i, next); break; } } } } if (next && !hq_enter(next - ld->list_head_offset)) { if (ld->flags & (RETURN_ON_DUPLICATE|RETURN_ON_LIST_ERROR)) { error(INFO, "\nduplicate list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } error(FATAL, "\nduplicate list entry: %lx\n", next); } if ((searchfor == next) || (searchfor == (next - ld->list_head_offset))) ld->searchfor = searchfor; count++; last = next; if ((ld->flags & LIST_CALLBACK) && ld->callback_func((void *)(next - ld->list_head_offset), ld->callback_data) && (ld->flags & CALLBACK_RETURN)) break; if (!readmem(next + ld->member_offset, KVADDR, &next, sizeof(void *), "list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } if (next == 0) { if (CRASHDEBUG(1)) console("do_list end: next:%lx\n", next); break; } if (next == ld->end) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == end:%lx\n", next, ld->end); break; } if (next == ld->start) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == start:%lx\n", next, ld->start); break; } if (next == last) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == last:%lx\n", next, last); break; } if ((next == first) && (count != 1)) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == first:%lx (count %d)\n", next, last, count); break; } } if (CRASHDEBUG(1)) console("do_list count: %d\n", count); if (ld->flags & LIST_ALLOCATE) { ld->list_ptr = (ulong *)GETBUF(count * sizeof(void *)); count = retrieve_list(ld->list_ptr, count); if (close_hq_on_return) hq_close(); } return count; } /* * Issue a dump_struct_member() call for one or more structure * members. Multiple members are passed in a comma-separated * list using the the format: * * struct.member1,member2,member3 */ void dump_struct_members(struct list_data *ld, int idx, ulong next) { int i, argc; char *p1, *p2; char *structname, *members; char *arglist[MAXARGS]; unsigned int radix; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; structname = GETBUF(strlen(ld->structname[idx])+1); members = GETBUF(strlen(ld->structname[idx])+1); strcpy(structname, ld->structname[idx]); p1 = strstr(structname, ".") + 1; p2 = strstr(ld->structname[idx], ".") + 1; strcpy(members, p2); replace_string(members, ",", ' '); argc = parse_line(members, arglist); for (i = 0; i < argc; i++) { *p1 = NULLCHAR; strcat(structname, arglist[i]); dump_struct_member(structname, next - ld->list_head_offset - ld->struct_list_offset, radix); } FREEBUF(structname); FREEBUF(members); } #define RADIXTREE_REQUEST (0x1) #define RBTREE_REQUEST (0x2) void cmd_tree() { int c, type_flag, others; long root_offset; struct tree_data tree_data, *td; struct datatype_member struct_member, *sm; struct syment *sp; ulong value; type_flag = 0; root_offset = 0; sm = &struct_member; td = &tree_data; BZERO(td, sizeof(struct tree_data)); while ((c = getopt(argcnt, args, "xdt:r:o:s:pN")) != EOF) { switch (c) { case 't': if (type_flag & (RADIXTREE_REQUEST|RBTREE_REQUEST)) { error(INFO, "multiple tree types may not be entered\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (STRNEQ(optarg, "ra")) type_flag = RADIXTREE_REQUEST; else if (STRNEQ(optarg, "rb")) type_flag = RBTREE_REQUEST; else { error(INFO, "invalid tree type: %s\n", optarg); cmd_usage(pc->curcmd, SYNOPSIS); } break; case 'r': if (td->flags & TREE_ROOT_OFFSET_ENTERED) error(FATAL, "root offset value %d (0x%lx) already entered\n", root_offset, root_offset); else if (IS_A_NUMBER(optarg)) root_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) root_offset = sm->member_offset; else error(FATAL, "invalid -r argument: %s\n", optarg); td->flags |= TREE_ROOT_OFFSET_ENTERED; break; case 'o': if (td->flags & TREE_NODE_OFFSET_ENTERED) error(FATAL, "node offset value %d (0x%lx) already entered\n", td->node_member_offset, td->node_member_offset); else if (IS_A_NUMBER(optarg)) td->node_member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) td->node_member_offset = sm->member_offset; else error(FATAL, "invalid -o argument: %s\n", optarg); td->flags |= TREE_NODE_OFFSET_ENTERED; break; case 's': if (td->structname_args++ == 0) hq_open(); hq_enter((ulong)optarg); break; case 'p': td->flags |= TREE_POSITION_DISPLAY; break; case 'N': td->flags |= TREE_NODE_POINTER; break; case 'x': if (td->flags & TREE_STRUCT_RADIX_10) error(FATAL, "-d and -x are mutually exclusive\n"); td->flags |= TREE_STRUCT_RADIX_16; break; case 'd': if (td->flags & TREE_STRUCT_RADIX_16) error(FATAL, "-d and -x are mutually exclusive\n"); td->flags |= TREE_STRUCT_RADIX_10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((type_flag & RADIXTREE_REQUEST) && (td->flags & TREE_NODE_OFFSET_ENTERED)) error(FATAL, "-o option is not applicable to radix trees\n"); if ((td->flags & TREE_ROOT_OFFSET_ENTERED) && (td->flags & TREE_NODE_POINTER)) error(INFO, "-r and -N options are mutually exclusive\n"); if (!args[optind]) { error(INFO, "a starting address is required\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if ((sp = symbol_search(args[optind]))) { td->start = sp->value; goto next_arg; } if (!IS_A_NUMBER(args[optind])) { if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { td->start = value; goto next_arg; } } error(FATAL, "invalid start argument: %s\n", args[optind]); } if (hexadecimal_only(args[optind], 0)) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { td->start = value; goto next_arg; } } error(FATAL, "invalid start argument: %s\n", args[optind]); next_arg: if (args[optind+1]) { error(INFO, "too many arguments entered\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (td->structname_args) { td->structname = (char **)GETBUF(sizeof(char *) * td->structname_args); retrieve_list((ulong *)td->structname, td->structname_args); hq_close(); } if (!(td->flags & TREE_NODE_POINTER)) td->start = td->start + root_offset; if (CRASHDEBUG(1)) { others = 0; fprintf(fp, " flags: %lx (", td->flags); if (td->flags & TREE_ROOT_OFFSET_ENTERED) fprintf(fp, "%sTREE_ROOT_OFFSET_ENTERED", others++ ? "|" : ""); if (td->flags & TREE_NODE_OFFSET_ENTERED) fprintf(fp, "%sTREE_NODE_OFFSET_ENTERED", others++ ? "|" : ""); if (td->flags & TREE_NODE_POINTER) fprintf(fp, "%sTREE_NODE_POINTER", others++ ? "|" : ""); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, "%sTREE_POSITION_DISPLAY", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_RADIX_10) fprintf(fp, "%sTREE_STRUCT_RADIX_10", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_RADIX_16) fprintf(fp, "%sTREE_STRUCT_RADIX_16", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " type: %s\n", type_flag & RADIXTREE_REQUEST ? "radix" : "red-black"); fprintf(fp, " node pointer: %s\n", td->flags & TREE_NODE_POINTER ? "yes" : "no"); fprintf(fp, " start: %lx\n", td->start); fprintf(fp, "node_member_offset: %ld\n", td->node_member_offset); fprintf(fp, " structname_args: %d\n", td->structname_args); fprintf(fp, " count: %d\n", td->count); } td->flags &= ~TREE_NODE_OFFSET_ENTERED; td->flags |= VERBOSE; hq_open(); if (type_flag & RADIXTREE_REQUEST) do_rdtree(td); else do_rbtree(td); hq_close(); if (td->structname_args) FREEBUF(td->structname); } static ulong RADIX_TREE_MAP_SHIFT = UNINITIALIZED; static ulong RADIX_TREE_MAP_SIZE = UNINITIALIZED; static ulong RADIX_TREE_MAP_MASK = UNINITIALIZED; int do_rdtree(struct tree_data *td) { long nlen; ulong node_p; uint print_radix, height; char pos[BUFSIZE]; if (!VALID_STRUCT(radix_tree_root) || !VALID_STRUCT(radix_tree_node) || !VALID_MEMBER(radix_tree_root_height) || !VALID_MEMBER(radix_tree_root_rnode) || !VALID_MEMBER(radix_tree_node_slots) || !ARRAY_LENGTH(height_to_maxindex)) error(FATAL, "radix trees do not exist or have changed " "their format\n"); if (RADIX_TREE_MAP_SHIFT == UNINITIALIZED) { if (!(nlen = MEMBER_SIZE("radix_tree_node", "slots"))) error(FATAL, "cannot determine length of " "radix_tree_node.slots[] array\n"); nlen /= sizeof(void *); RADIX_TREE_MAP_SHIFT = ffsl(nlen) - 1; RADIX_TREE_MAP_SIZE = (1UL << RADIX_TREE_MAP_SHIFT); RADIX_TREE_MAP_MASK = (RADIX_TREE_MAP_SIZE-1); } if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; if (td->flags & TREE_NODE_POINTER) { node_p = td->start; if (node_p & 1) node_p &= ~1; if (VALID_MEMBER(radix_tree_node_height)) { readmem(node_p + OFFSET(radix_tree_node_height), KVADDR, &height, sizeof(uint), "radix_tree_node height", FAULT_ON_ERROR); if (height > ARRAY_LENGTH(height_to_maxindex)) { fprintf(fp, "radix_tree_node at %lx\n", node_p); dump_struct("radix_tree_node", node_p, print_radix); error(FATAL, "height %d is greater than " "height_to_maxindex[] index %ld\n", height, ARRAY_LENGTH(height_to_maxindex)); } } else error(FATAL, "-N option is not supported or applicable" " for radix trees on this architecture or kernel\n"); } else { readmem(td->start + OFFSET(radix_tree_root_height), KVADDR, &height, sizeof(uint), "radix_tree_root height", FAULT_ON_ERROR); if (height > ARRAY_LENGTH(height_to_maxindex)) { fprintf(fp, "radix_tree_root at %lx\n", td->start); dump_struct("radix_tree_root", (ulong)td->start, print_radix); error(FATAL, "height %d is greater than " "height_to_maxindex[] index %ld\n", height, ARRAY_LENGTH(height_to_maxindex)); } readmem(td->start + OFFSET(radix_tree_root_rnode), KVADDR, &node_p, sizeof(void *), "radix_tree_root rnode", FAULT_ON_ERROR); } if (node_p & 1) node_p &= ~1; sprintf(pos, "root"); rdtree_iteration(node_p, td, pos, -1, height); return td->count; } void rdtree_iteration(ulong node_p, struct tree_data *td, char *ppos, ulong indexnum, uint height) { ulong slot; int i, index; uint print_radix; char pos[BUFSIZE]; if (indexnum != -1) sprintf(pos, "%s/%ld", ppos, indexnum); else sprintf(pos, "%s", ppos); for (index = 0; index < RADIX_TREE_MAP_SIZE; index++) { readmem((ulong)node_p + OFFSET(radix_tree_node_slots) + sizeof(void *) * index, KVADDR, &slot, sizeof(void *), "radix_tree_node.slot[index]", FAULT_ON_ERROR); if (!slot) continue; if (height == 1) { if (hq_enter(slot)) td->count++; else error(FATAL, "\nduplicate tree entry: %lx\n", node_p); if (td->flags & VERBOSE) fprintf(fp, "%lx\n",slot); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, " position: %s/%d\n", pos, index); if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch(count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], slot, print_radix); break; default: dump_struct_members_for_tree(td, i, slot); break; } } } } else rdtree_iteration(slot, td, pos, index, height-1); } } int do_rbtree(struct tree_data *td) { ulong start; char pos[BUFSIZE]; if (!VALID_MEMBER(rb_root_rb_node) || !VALID_MEMBER(rb_node_rb_left) || !VALID_MEMBER(rb_node_rb_right)) error(FATAL, "red-black trees do not exist or have changed " "their format\n"); sprintf(pos, "root"); if (td->flags & TREE_NODE_POINTER) start = td->start; else readmem(td->start + OFFSET(rb_root_rb_node), KVADDR, &start, sizeof(void *), "rb_root rb_node", FAULT_ON_ERROR); rbtree_iteration(start, td, pos); return td->count; } void rbtree_iteration(ulong node_p, struct tree_data *td, char *pos) { int i; uint print_radix; ulong struct_p, left_p, right_p; char left_pos[BUFSIZE], right_pos[BUFSIZE]; if (!node_p) return; if (hq_enter(node_p)) td->count++; else error(FATAL, "\nduplicate tree entry: %lx\n", node_p); struct_p = node_p - td->node_member_offset; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", struct_p); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, " position: %s\n", pos); if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch(count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], struct_p, print_radix); break; default: dump_struct_members_for_tree(td, i, struct_p); break; } } } readmem(node_p+OFFSET(rb_node_rb_left), KVADDR, &left_p, sizeof(void *), "rb_node rb_left", FAULT_ON_ERROR); readmem(node_p+OFFSET(rb_node_rb_right), KVADDR, &right_p, sizeof(void *), "rb_node rb_right", FAULT_ON_ERROR); sprintf(left_pos, "%s/l", pos); sprintf(right_pos, "%s/r", pos); rbtree_iteration(left_p, td, left_pos); rbtree_iteration(right_p, td, right_pos); } void dump_struct_members_for_tree(struct tree_data *td, int idx, ulong struct_p) { int i, argc; uint print_radix; char *p1; char *structname, *members; char *arglist[MAXARGS]; if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; structname = GETBUF(strlen(td->structname[idx])+1); members = GETBUF(strlen(td->structname[idx])+1); strcpy(structname, td->structname[idx]); p1 = strstr(structname, ".") + 1; strcpy(members, p1); replace_string(members, ",", ' '); argc = parse_line(members, arglist); for (i = 0; i pageshift) #define HQ_INDEX(X) (((X) >> HQ_SHIFT) % pc->nr_hash_queues) struct hq_entry { int next; int order; ulong value; }; struct hq_head { int next; int qcnt; }; struct hash_table { ulong flags; struct hq_head *queue_heads; struct hq_entry *memptr; long count; long index; int reallocs; } hash_table = { 0 }; /* * For starters, allocate a hash table containing HQ_ENTRY_CHUNK entries. * If necessary during runtime, it will be increased in size. */ void hq_init(void) { struct hash_table *ht; ht = &hash_table; if (pc->nr_hash_queues == 0) pc->nr_hash_queues = NR_HASH_QUEUES_DEFAULT; if ((ht->queue_heads = (struct hq_head *)malloc(pc->nr_hash_queues * sizeof(struct hq_head))) == NULL) { error(INFO, "cannot malloc memory for hash queue heads: %s\n", strerror(errno)); ht->flags = HASH_QUEUE_NONE; pc->flags &= ~HASH; return; } if ((ht->memptr = (struct hq_entry *)malloc(HQ_ENTRY_CHUNK * sizeof(struct hq_entry))) == NULL) { error(INFO, "cannot malloc memory for hash queues: %s\n", strerror(errno)); ht->flags = HASH_QUEUE_NONE; pc->flags &= ~HASH; return; } BZERO(ht->memptr, HQ_ENTRY_CHUNK * sizeof(struct hq_entry)); ht->count = HQ_ENTRY_CHUNK; ht->index = 0; } /* * Get a free hash queue entry. If there's no more available, realloc() * a new chunk of memory with another HQ_ENTRY_CHUNK entries stuck on the end. */ static long alloc_hq_entry(void) { struct hash_table *ht; struct hq_entry *new, *end_of_old; ht = &hash_table; if (++ht->index == ht->count) { if (!(new = (void *)realloc((void *)ht->memptr, (ht->count+HQ_ENTRY_CHUNK) * sizeof(struct hq_entry)))) { error(INFO, "cannot realloc memory for hash queues: %s\n", strerror(errno)); ht->flags |= HASH_QUEUE_FULL; return(-1); } ht->reallocs++; ht->memptr = new; end_of_old = ht->memptr + ht->count; BZERO(end_of_old, HQ_ENTRY_CHUNK * sizeof(struct hq_entry)); ht->count += HQ_ENTRY_CHUNK; } return(ht->index); } /* * Restore the hash queue to its state before the duplicate entry * was attempted. */ static void dealloc_hq_entry(struct hq_entry *entry) { struct hash_table *ht; long hqi; ht = &hash_table; hqi = HQ_INDEX(entry->value); ht->index--; BZERO(entry, sizeof(struct hq_entry)); ht->queue_heads[hqi].qcnt--; } /* * Initialize the hash table for a hashing session. */ int hq_open(void) { struct hash_table *ht; if (!(pc->flags & HASH)) return FALSE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE|HASH_QUEUE_OPEN)) return FALSE; ht->flags &= ~(HASH_QUEUE_FULL|HASH_QUEUE_CLOSED); BZERO(ht->queue_heads, sizeof(struct hq_head) * pc->nr_hash_queues); BZERO(ht->memptr, ht->count * sizeof(struct hq_entry)); ht->index = 0; ht->flags |= HASH_QUEUE_OPEN; return TRUE; } int hq_is_open(void) { struct hash_table *ht; ht = &hash_table; return (ht->flags & HASH_QUEUE_OPEN ? TRUE : FALSE); } int hq_is_inuse(void) { struct hash_table *ht; if (!hq_is_open()) return FALSE; ht = &hash_table; return (ht->index ? TRUE : FALSE); } /* * Close the hash table, returning the number of items hashed in this session. */ int hq_close(void) { struct hash_table *ht; ht = &hash_table; ht->flags &= ~(HASH_QUEUE_OPEN); ht->flags |= HASH_QUEUE_CLOSED; if (!(pc->flags & HASH)) return(0); if (ht->flags & HASH_QUEUE_NONE) return(0); ht->flags &= ~HASH_QUEUE_FULL; return(ht->index); } char *corrupt_hq = "corrupt hash queue entry: value: %lx next: %d order: %d\n"; /* * For a given value, allocate a hash queue entry and hash it into the * open hash table. If a duplicate entry is found, return FALSE; for all * other possibilities return TRUE. Note that it's up to the user to deal * with failure. */ int hq_enter(ulong value) { struct hash_table *ht; struct hq_entry *entry; struct hq_entry *list_entry; long hqi; long index; if (!(pc->flags & HASH)) return TRUE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE|HASH_QUEUE_FULL)) return TRUE; if (!(ht->flags & HASH_QUEUE_OPEN)) return TRUE; if ((index = alloc_hq_entry()) < 0) return TRUE; entry = ht->memptr + index; if (entry->next || entry->value || entry->order) { error(INFO, corrupt_hq, entry->value, entry->next, entry->order); ht->flags |= HASH_QUEUE_NONE; return TRUE; } entry->next = 0; entry->value = value; entry->order = index; hqi = HQ_INDEX(value); if (ht->queue_heads[hqi].next == 0) { ht->queue_heads[hqi].next = index; ht->queue_heads[hqi].qcnt = 1; return TRUE; } else ht->queue_heads[hqi].qcnt++; list_entry = ht->memptr + ht->queue_heads[hqi].next; while (TRUE) { if (list_entry->value == entry->value) { dealloc_hq_entry(entry); return FALSE; } if (list_entry->next >= ht->count) { error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return TRUE; } if (list_entry->next == 0) break; list_entry = ht->memptr + list_entry->next; } list_entry->next = index; return TRUE; } /* * "hash -d" output */ void dump_hash_table(int verbose) { int i; struct hash_table *ht; struct hq_entry *list_entry; long elements; long queues_in_use; int others; uint minq, maxq; ht = &hash_table; others = 0; fprintf(fp, " flags: %lx (", ht->flags); if (ht->flags & HASH_QUEUE_NONE) fprintf(fp, "%sHASH_QUEUE_NONE", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_OPEN) fprintf(fp, "%sHASH_QUEUE_OPEN", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_CLOSED) fprintf(fp, "%sHASH_QUEUE_CLOSED", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_FULL) fprintf(fp, "%sHASH_QUEUE_FULL", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " queue_heads[%ld]: %lx\n", pc->nr_hash_queues, (ulong)ht->queue_heads); fprintf(fp, " memptr: %lx\n", (ulong)ht->memptr); fprintf(fp, " count: %ld ", ht->count); if (ht->reallocs) fprintf(fp, " (%d reallocs)", ht->reallocs); fprintf(fp, "\n"); fprintf(fp, " index: %ld\n", ht->index); queues_in_use = 0; minq = ~(0); maxq = 0; for (i = 0; i < pc->nr_hash_queues; i++) { if (ht->queue_heads[i].next == 0) { minq = 0; continue; } if (ht->queue_heads[i].qcnt < minq) minq = ht->queue_heads[i].qcnt; if (ht->queue_heads[i].qcnt > maxq) maxq = ht->queue_heads[i].qcnt; queues_in_use++; } elements = 0; list_entry = ht->memptr; for (i = 0; i < ht->count; i++, list_entry++) { if (!list_entry->order) { if (list_entry->value || list_entry->next) goto corrupt_list_entry; continue; } if (list_entry->next >= ht->count) goto corrupt_list_entry; ++elements; } if (elements != ht->index) fprintf(fp, " elements found: %ld (expected %ld)\n", elements, ht->index); fprintf(fp, " queues in use: %ld of %ld\n", queues_in_use, pc->nr_hash_queues); fprintf(fp, " queue length range: %d to %d\n", minq, maxq); if (verbose) { if (!elements) { fprintf(fp, " entries: (none)\n"); return; } fprintf(fp, " entries: "); list_entry = ht->memptr; for (i = 0; i < ht->count; i++, list_entry++) { if (list_entry->order) fprintf(fp, "%s%lx (%d)\n", list_entry->order == 1 ? "" : " ", list_entry->value, list_entry->order); } } return; corrupt_list_entry: error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; } /* * Retrieve the count of, and optionally stuff a pre-allocated array with, * the current hash table entries. The entries will be sorted according * to the order in which they were entered, so from this point on, no * further hq_enter() operations on this list will be allowed. However, * multiple calls to retrieve_list are allowed because the second and * subsequent ones will go directly to where the non-zero (valid) entries * start in the potentially very large list_entry memory chunk. */ int retrieve_list(ulong array[], int count) { int i; struct hash_table *ht; struct hq_entry *list_entry; int elements; if (!(pc->flags & HASH)) error(FATAL, "cannot perform this command with hash turned off\n"); ht = &hash_table; list_entry = ht->memptr; for (i = elements = 0; i < ht->count; i++, list_entry++) { if (!list_entry->order) { if (list_entry->value || list_entry->next) goto corrupt_list_entry; continue; } if (list_entry->next >= ht->count) goto corrupt_list_entry; if (array) array[elements] = list_entry->value; if (++elements == count) break; } return elements; corrupt_list_entry: error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return(-1); } /* * For a given value, check to see if a hash queue entry exists. If an * entry is found, return TRUE; for all other possibilities return FALSE. */ int hq_entry_exists(ulong value) { struct hash_table *ht; struct hq_entry *list_entry; long hqi; if (!(pc->flags & HASH)) return FALSE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE)) return FALSE; if (!(ht->flags & HASH_QUEUE_OPEN)) return FALSE; hqi = HQ_INDEX(value); list_entry = ht->memptr + ht->queue_heads[hqi].next; while (TRUE) { if (list_entry->value == value) return TRUE; if (list_entry->next >= ht->count) { error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return FALSE; } if (list_entry->next == 0) break; list_entry = ht->memptr + list_entry->next; } return FALSE; } /* * K&R power function for integers */ long power(long base, int exp) { int i; long p; p = 1; for (i = 1; i <= exp; i++) p = p * base; return p; } long long ll_power(long long base, long long exp) { long long i; long long p; p = 1; for (i = 1; i <= exp; i++) p = p * base; return p; } /* * Internal buffer allocation scheme to avoid inline malloc() calls and * resultant memory leaks due to aborted commands. These buffers are * for TEMPORARY use on a per-command basis. They are allocated by calls * to GETBUF(size). They can explicitly freed by FREEBUF(address), but * they are all freed by free_all_bufs() which is called in a number of * places, most not */ #define NUMBER_1K_BUFS (10) #define NUMBER_2K_BUFS (10) #define NUMBER_4K_BUFS (5) #define NUMBER_8K_BUFS (5) #define NUMBER_32K_BUFS (1) #define SHARED_1K_BUF_FULL (0x003ff) #define SHARED_2K_BUF_FULL (0x003ff) #define SHARED_4K_BUF_FULL (0x0001f) #define SHARED_8K_BUF_FULL (0x0001f) #define SHARED_32K_BUF_FULL (0x00001) #define SHARED_1K_BUF_AVAIL(X) \ (NUMBER_1K_BUFS && !(((X) & SHARED_1K_BUF_FULL) == SHARED_1K_BUF_FULL)) #define SHARED_2K_BUF_AVAIL(X) \ (NUMBER_2K_BUFS && !(((X) & SHARED_2K_BUF_FULL) == SHARED_2K_BUF_FULL)) #define SHARED_4K_BUF_AVAIL(X) \ (NUMBER_4K_BUFS && !(((X) & SHARED_4K_BUF_FULL) == SHARED_4K_BUF_FULL)) #define SHARED_8K_BUF_AVAIL(X) \ (NUMBER_8K_BUFS && !(((X) & SHARED_8K_BUF_FULL) == SHARED_8K_BUF_FULL)) #define SHARED_32K_BUF_AVAIL(X) \ (NUMBER_32K_BUFS && !(((X) & SHARED_32K_BUF_FULL) == SHARED_32K_BUF_FULL)) #define B1K (0) #define B2K (1) #define B4K (2) #define B8K (3) #define B32K (4) #define SHARED_BUF_SIZES (B32K+1) #define MAX_MALLOC_BUFS (2000) #define MAX_CACHE_SIZE (KILOBYTES(32)) struct shared_bufs { char buf_1K[NUMBER_1K_BUFS][1024]; char buf_2K[NUMBER_2K_BUFS][2048]; char buf_4K[NUMBER_4K_BUFS][4096]; char buf_8K[NUMBER_8K_BUFS][8192]; char buf_32K[NUMBER_32K_BUFS][32768]; long buf_1K_used; long buf_2K_used; long buf_4K_used; long buf_8K_used; long buf_32K_used; long buf_1K_maxuse; long buf_2K_maxuse; long buf_4K_maxuse; long buf_8K_maxuse; long buf_32K_maxuse; long buf_1K_ovf; long buf_2K_ovf; long buf_4K_ovf; long buf_8K_ovf; long buf_32K_ovf; int buf_inuse[SHARED_BUF_SIZES]; char *malloc_bp[MAX_MALLOC_BUFS]; long smallest; long largest; long embedded; long max_embedded; long mallocs; long frees; double total; ulong reqs; } shared_bufs; void buf_init(void) { struct shared_bufs *bp; bp = &shared_bufs; BZERO(bp, sizeof(struct shared_bufs)); bp->smallest = 0x7fffffff; bp->total = 0.0; } /* * Free up all buffers used by the last command. */ void free_all_bufs(void) { int i; struct shared_bufs *bp; bp = &shared_bufs; bp->embedded = 0; for (i = 0; i < SHARED_BUF_SIZES; i++) bp->buf_inuse[i] = 0; for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i]) { free(bp->malloc_bp[i]); bp->malloc_bp[i] = NULL; bp->frees++; } } if (bp->mallocs != bp->frees) error(WARNING, "malloc/free mismatch (%ld/%ld)\n", bp->mallocs, bp->frees); } /* * Free a specific buffer that may have been returned by malloc(). * If the address is one of the static buffers, look for it and * clear its inuse bit. */ void freebuf(char *addr) { int i; struct shared_bufs *bp; bp = &shared_bufs; bp->embedded--; if (CRASHDEBUG(8)) { INDENT(bp->embedded*2); fprintf(fp, "FREEBUF(%ld)\n", bp->embedded); } for (i = 0; i < NUMBER_1K_BUFS; i++) { if (addr == (char *)&bp->buf_1K[i]) { bp->buf_inuse[B1K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_2K_BUFS; i++) { if (addr == (char *)&bp->buf_2K[i]) { bp->buf_inuse[B2K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_4K_BUFS; i++) { if (addr == (char *)&bp->buf_4K[i]) { bp->buf_inuse[B4K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_8K_BUFS; i++) { if (addr == (char *)&bp->buf_8K[i]) { bp->buf_inuse[B8K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_32K_BUFS; i++) { if (addr == (char *)&bp->buf_32K[i]) { bp->buf_inuse[B32K] &= ~(1 << i); return; } } for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i] == addr) { free(bp->malloc_bp[i]); bp->malloc_bp[i] = NULL; bp->frees++; return; } } error(FATAL, "freeing an unknown buffer -- shared buffer inconsistency!\n"); } /* DEBUG */ void dump_embedded(char *s) { struct shared_bufs *bp; char *p1; p1 = s ? s : ""; bp = &shared_bufs; console("%s: embedded: %ld mallocs: %ld frees: %ld\n", p1, bp->embedded, bp->mallocs, bp->frees); } /* DEBUG */ long get_embedded(void) { struct shared_bufs *bp; bp = &shared_bufs; return(bp->embedded); } /* * "help -b" output */ void dump_shared_bufs(void) { int i; struct shared_bufs *bp; bp = &shared_bufs; fprintf(fp, " buf_1K_used: %ld\n", bp->buf_1K_used); fprintf(fp, " buf_2K_used: %ld\n", bp->buf_2K_used); fprintf(fp, " buf_4K_used: %ld\n", bp->buf_4K_used); fprintf(fp, " buf_8K_used: %ld\n", bp->buf_8K_used); fprintf(fp, " buf_32K_used: %ld\n", bp->buf_32K_used); fprintf(fp, " buf_1K_ovf: %ld\n", bp->buf_1K_ovf); fprintf(fp, " buf_2K_ovf: %ld\n", bp->buf_2K_ovf); fprintf(fp, " buf_4K_ovf: %ld\n", bp->buf_4K_ovf); fprintf(fp, " buf_8K_ovf: %ld\n", bp->buf_8K_ovf); fprintf(fp, " buf_32K_ovf: %ld\n", bp->buf_32K_ovf); fprintf(fp, " buf_1K_maxuse: %2ld of %d\n", bp->buf_1K_maxuse, NUMBER_1K_BUFS); fprintf(fp, " buf_2K_maxuse: %2ld of %d\n", bp->buf_2K_maxuse, NUMBER_2K_BUFS); fprintf(fp, " buf_4K_maxuse: %2ld of %d\n", bp->buf_4K_maxuse, NUMBER_4K_BUFS); fprintf(fp, " buf_8K_maxuse: %2ld of %d\n", bp->buf_8K_maxuse, NUMBER_8K_BUFS); fprintf(fp, "buf_32K_maxuse: %2ld of %d\n", bp->buf_32K_maxuse, NUMBER_32K_BUFS); fprintf(fp, " buf_inuse[%d]: ", SHARED_BUF_SIZES); for (i = 0; i < SHARED_BUF_SIZES; i++) fprintf(fp, "[%lx]", (ulong)bp->buf_inuse[i]); fprintf(fp, "\n"); for (i = 0; i < MAX_MALLOC_BUFS; i++) if (bp->malloc_bp[i]) fprintf(fp, " malloc_bp[%d]: %lx\n", i, (ulong)bp->malloc_bp[i]); if (bp->smallest == 0x7fffffff) fprintf(fp, " smallest: 0\n"); else fprintf(fp, " smallest: %ld\n", bp->smallest); fprintf(fp, " largest: %ld\n", bp->largest); fprintf(fp, " embedded: %ld\n", bp->embedded); fprintf(fp, " max_embedded: %ld\n", bp->max_embedded); fprintf(fp, " mallocs: %ld\n", bp->mallocs); fprintf(fp, " frees: %ld\n", bp->frees); fprintf(fp, " reqs/total: %ld/%.0f\n", bp->reqs, bp->total); fprintf(fp, " average size: %.0f\n", bp->total/bp->reqs); } /* * Try to get one of the static buffers first. If not available, fall * through and get it from malloc(), keeping trace of the returned address. */ #define SHARED_BUFSIZE(size) \ ((size <= 1024) ? 1024 >> 7 : \ ((size <= 2048) ? 2048 >> 7 : \ ((size <= 4096) ? 4096 >> 7 : \ ((size <= 8192) ? 8192 >> 7 : \ ((size <= 32768) ? 32768 >> 7 : -1))))) char * getbuf(long reqsize) { int i; int index; int bdx; int mask; struct shared_bufs *bp; char *bufp; if (!reqsize) { ulong retaddr = (ulong)__builtin_return_address(0); error(FATAL, "zero-size memory allocation! (called from %lx)\n", retaddr); } bp = &shared_bufs; index = SHARED_BUFSIZE(reqsize); if (CRASHDEBUG(7) && (reqsize > MAX_CACHE_SIZE)) error(NOTE, "GETBUF request > MAX_CACHE_SIZE: %ld\n", reqsize); if (CRASHDEBUG(8)) { INDENT(bp->embedded*2); fprintf(fp, "GETBUF(%ld -> %ld)\n", reqsize, bp->embedded); } bp->embedded++; if (bp->embedded > bp->max_embedded) bp->max_embedded = bp->embedded; if (reqsize < bp->smallest) bp->smallest = reqsize; if (reqsize > bp->largest) bp->largest = reqsize; bp->total += reqsize; bp->reqs++; switch (index) { case -1: break; case 8: if (SHARED_1K_BUF_AVAIL(bp->buf_inuse[B1K])) { mask = ~(bp->buf_inuse[B1K]); bdx = ffs(mask) - 1; bufp = bp->buf_1K[bdx]; bp->buf_1K_used++; bp->buf_inuse[B1K] |= (1 << bdx); bp->buf_1K_maxuse = MAX(bp->buf_1K_maxuse, count_bits_int(bp->buf_inuse[B1K])); BZERO(bufp, 1024); return(bufp); } bp->buf_1K_ovf++; /* FALLTHROUGH */ case 16: if (SHARED_2K_BUF_AVAIL(bp->buf_inuse[B2K])) { mask = ~(bp->buf_inuse[B2K]); bdx = ffs(mask) - 1; bufp = bp->buf_2K[bdx]; bp->buf_2K_used++; bp->buf_inuse[B2K] |= (1 << bdx); bp->buf_2K_maxuse = MAX(bp->buf_2K_maxuse, count_bits_int(bp->buf_inuse[B2K])); BZERO(bufp, 2048); return(bufp); } bp->buf_2K_ovf++; /* FALLTHROUGH */ case 32: if (SHARED_4K_BUF_AVAIL(bp->buf_inuse[B4K])) { mask = ~(bp->buf_inuse[B4K]); bdx = ffs(mask) - 1; bufp = bp->buf_4K[bdx]; bp->buf_4K_used++; bp->buf_inuse[B4K] |= (1 << bdx); bp->buf_4K_maxuse = MAX(bp->buf_4K_maxuse, count_bits_int(bp->buf_inuse[B4K])); BZERO(bufp, 4096); return(bufp); } bp->buf_4K_ovf++; /* FALLTHROUGH */ case 64: if (SHARED_8K_BUF_AVAIL(bp->buf_inuse[B8K])) { mask = ~(bp->buf_inuse[B8K]); bdx = ffs(mask) - 1; bufp = bp->buf_8K[bdx]; bp->buf_8K_used++; bp->buf_inuse[B8K] |= (1 << bdx); bp->buf_8K_maxuse = MAX(bp->buf_8K_maxuse, count_bits_int(bp->buf_inuse[B8K])); BZERO(bufp, 8192); return(bufp); } bp->buf_8K_ovf++; /* FALLTHROUGH */ case 256: if (SHARED_32K_BUF_AVAIL(bp->buf_inuse[B32K])) { mask = ~(bp->buf_inuse[B32K]); bdx = ffs(mask) - 1; bufp = bp->buf_32K[bdx]; bp->buf_32K_used++; bp->buf_inuse[B32K] |= (1 << bdx); bp->buf_32K_maxuse = MAX(bp->buf_32K_maxuse, count_bits_int(bp->buf_inuse[B32K])); BZERO(bufp, 32768); return(bufp); } bp->buf_32K_ovf++; break; } for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i]) continue; if ((bp->malloc_bp[i] = (char *)calloc(reqsize, 1))) { bp->mallocs++; return(bp->malloc_bp[i]); } break; } dump_shared_bufs(); return ((char *)(long) error(FATAL, "cannot allocate any more memory!\n")); } /* * Change the size of the previously-allocated memory block * pointed to by oldbuf to newsize bytes. Copy the minimum * of oldsize and newsize bytes from the oldbuf to the newbuf, * and return the address of the new buffer, which will have * a different address than oldbuf. */ char * resizebuf(char *oldbuf, long oldsize, long newsize) { char *newbuf; newbuf = GETBUF(newsize); BCOPY(oldbuf, newbuf, MIN(oldsize, newsize)); FREEBUF(oldbuf); return newbuf; } /* * Duplicate a string into a buffer allocated with GETBUF(). */ char * strdupbuf(char *oldstring) { char *newstring; newstring = GETBUF(strlen(oldstring)+1); strcpy(newstring, oldstring); return newstring; } /* * Return the number of bits set in an int or long. */ int count_bits_int(int val) { int i, cnt; int total; cnt = sizeof(int) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) total++; val >>= 1; } return total; } int count_bits_long(ulong val) { int i, cnt; int total; cnt = sizeof(long) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) total++; val >>= 1; } return total; } int highest_bit_long(ulong val) { int i, cnt; int total; int highest; highest = -1; cnt = sizeof(long) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) highest = i; val >>= 1; } return highest; } int lowest_bit_long(ulong val) { int i, cnt; int lowest; lowest = -1; cnt = sizeof(long) * 8; for (i = 0; i < cnt; i++) { if (val & 1) { lowest = i; break; } val >>= 1; } return lowest; } /* * Debug routine to stop whatever's going on in its tracks. */ void drop_core(char *s) { volatile int *nullptr; int i ATTRIBUTE_UNUSED; if (s && ascii_string(s)) fprintf(stderr, "%s", s); kill((pid_t)pc->program_pid, 3); nullptr = NULL; while (TRUE) i = *nullptr; } /* * For debug output to a device other than the current terminal. * pc->console must have been preset by: * * 1. by an .rc file setting: "set console /dev/whatever" * 2. by a runtime command: "set console /dev/whatever" * 3. during program invocation: "-c /dev/whatever" * * The first time it's called, the device will be opened. */ int console(char *fmt, ...) { char output[BUFSIZE*2]; va_list ap; if (!pc->console || !strlen(pc->console) || (pc->flags & NO_CONSOLE) || (pc->confd == -1)) return 0; if (!fmt || !strlen(fmt)) return 0; va_start(ap, fmt); (void)vsnprintf(output, BUFSIZE*2, fmt, ap); va_end(ap); if (pc->confd == -2) { if ((pc->confd = open(pc->console, O_WRONLY|O_NDELAY)) < 0) { error(INFO, "console device %s: %s\n", pc->console, strerror(errno), 0, 0); return 0; } } return(write(pc->confd, output, strlen(output))); } /* * Allocate space to store the designated console device name. * If a console device pre-exists, free its name space and close the device. */ void create_console_device(char *dev) { if (pc->console) { if (pc->confd != -1) close(pc->confd); free(pc->console); } pc->confd = -2; if ((pc->console = (char *)malloc(strlen(dev)+1)) == NULL) fprintf(stderr, "console name malloc: %s\n", strerror(errno)); else { strcpy(pc->console, dev); if (console("debug console [%ld]: %s\n", pc->program_pid, (ulong)pc->console) < 0) { close(pc->confd); free(pc->console); pc->console = NULL; pc->confd = -1; if (!(pc->flags & RUNTIME)) error(INFO, "cannot set console to %s\n", dev); } } } /* * Disable console output without closing the device. * Typically used with CONSOLE_OFF() macro. */ int console_off(void) { int orig_no_console; orig_no_console = pc->flags & NO_CONSOLE; pc->flags |= NO_CONSOLE; return orig_no_console; } /* * Re-enable console output. Typically used with CONSOLE_ON() macro. */ int console_on(int orig_no_console) { if (!orig_no_console) pc->flags &= ~NO_CONSOLE; return(pc->flags & NO_CONSOLE); } /* * Print a string to the console device with no formatting, useful for * sending strings containing % signs. */ int console_verbatim(char *s) { char *p; int cnt; if (!pc->console || !strlen(pc->console) || (pc->flags & NO_CONSOLE) || (pc->confd == -1)) return 0; if (!s || !strlen(s)) return 0; if (pc->confd == -2) { if ((pc->confd = open(pc->console, O_WRONLY|O_NDELAY)) < 0) { fprintf(stderr, "%s: %s\n", pc->console, strerror(errno)); return 0; } } for (cnt = 0, p = s; *p; p++) { if (write(pc->confd, p, 1) != 1) break; cnt++; } return cnt; } /* * Set up a signal handler. */ void sigsetup(int sig, void *handler, struct sigaction *act,struct sigaction *oldact) { BZERO(act, sizeof(struct sigaction)); act->sa_handler = handler; act->sa_flags = SA_NOMASK; sigaction(sig, act, oldact); } /* * Convert a jiffies-based time value into a string showing the * the number of days, hours:minutes:seconds. */ #define SEC_MINUTES (60) #define SEC_HOURS (60 * SEC_MINUTES) #define SEC_DAYS (24 * SEC_HOURS) char * convert_time(ulonglong count, char *buf) { ulonglong total, days, hours, minutes, seconds; if (CRASHDEBUG(2)) error(INFO, "convert_time: %lld (%llx)\n", count, count); if (!machdep->hz) { sprintf(buf, "(cannot calculate: unknown HZ value)"); return buf; } total = (count)/(ulonglong)machdep->hz; days = total / SEC_DAYS; total %= SEC_DAYS; hours = total / SEC_HOURS; total %= SEC_HOURS; minutes = total / SEC_MINUTES; seconds = total % SEC_MINUTES; buf[0] = NULLCHAR; if (days) sprintf(buf, "%llu days, ", days); sprintf(&buf[strlen(buf)], "%02llu:%02llu:%02llu", hours, minutes, seconds); return buf; } /* * Stall for a number of microseconds. */ void stall(ulong microseconds) { struct timeval delay; delay.tv_sec = 0; delay.tv_usec = (__time_t)microseconds; (void) select(0, (fd_set *) 0, (fd_set *) 0, (fd_set *) 0, &delay); } /* * Fill a buffer with a page count translated to a GB/MB/KB value. */ char * pages_to_size(ulong pages, char *buf) { double total; char *p1, *p2; if (pages == 0) { sprintf(buf, "0"); return buf; } total = (double)pages * (double)PAGESIZE(); if (total >= GIGABYTES(1)) sprintf(buf, "%.1f GB", total/(double)GIGABYTES(1)); else if (total >= MEGABYTES(1)) sprintf(buf, "%.1f MB", total/(double)MEGABYTES(1)); else sprintf(buf, "%ld KB", (ulong)(total/(double)KILOBYTES(1))); if ((p1 = strstr(buf, ".0 "))) { p2 = p1 + 3; *p1++ = ' '; strcpy(p1, p2); } return buf; } /* * If the list_head.next value points to itself, it's an emtpy list. */ int empty_list(ulong list_head_addr) { ulong next; if (!readmem(list_head_addr, KVADDR, &next, sizeof(void *), "list_head next contents", RETURN_ON_ERROR)) return TRUE; return (next == list_head_addr); } int machine_type(char *type) { return STREQ(MACHINE_TYPE, type); } int machine_type_mismatch(char *file, char *e_machine, char *alt, ulong query) { if (machine_type(e_machine) || machine_type(alt)) return FALSE; if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ return TRUE; error(WARNING, "machine type mismatch:\n"); fprintf(fp, " crash utility: %s\n", MACHINE_TYPE); fprintf(fp, " %s: %s%s%s\n\n", file, e_machine, alt ? " or " : "", alt ? alt : ""); return TRUE; } void command_not_supported() { error(FATAL, "command not supported or applicable on this architecture or kernel\n"); } void option_not_supported(int c) { error(FATAL, "-%c option not supported or applicable on this architecture or kernel\n", (char)c); } static int please_wait_len = 0; void please_wait(char *s) { int fd; char buf[BUFSIZE]; if ((pc->flags & SILENT) || !DUMPFILE() || (pc->flags & RUNTIME)) return; if (!(pc->flags & TTY) && KVMDUMP_DUMPFILE()) { if (!isatty(fileno(stdin)) || ((fd = open("/dev/tty", O_RDONLY)) < 0)) return; close(fd); } pc->flags |= PLEASE_WAIT; please_wait_len = sprintf(buf, "\rplease wait... (%s)", s); fprintf(fp, "%s", buf); fflush(fp); } void please_wait_done(void) { if (!(pc->flags & PLEASE_WAIT)) return; pc->flags &= ~PLEASE_WAIT; fprintf(fp, "\r"); pad_line(fp, please_wait_len, ' '); fprintf(fp, "\r"); fflush(fp); } /* * Compare two pathnames. */ int pathcmp(char *p1, char *p2) { char c1, c2; do { if ((c1 = *p1++) == '/') while (*p1 == '/') { p1++; } if ((c2 = *p2++) == '/') while (*p2 == '/') { p2++; } if (c1 == '\0') return ((c2 == '/') && (*p2 == '\0')) ? 0 : c1 - c2; } while (c1 == c2); return ((c2 == '\0') && (c1 == '/') && (*p1 == '\0')) ? 0 : c1 - c2; } #include /* * Check the byte-order of an ELF file vs. the host byte order. */ int endian_mismatch(char *file, char dumpfile_endian, ulong query) { char *endian; switch (dumpfile_endian) { case ELFDATA2LSB: if (__BYTE_ORDER == __LITTLE_ENDIAN) return FALSE; endian = "little-endian"; break; case ELFDATA2MSB: if (__BYTE_ORDER == __BIG_ENDIAN) return FALSE; endian = "big-endian"; break; default: endian = "unknown"; break; } if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ return TRUE; error(WARNING, "endian mismatch:\n"); fprintf(fp, " crash utility: %s\n", (__BYTE_ORDER == __LITTLE_ENDIAN) ? "little-endian" : "big-endian"); fprintf(fp, " %s: %s\n\n", file, endian); return TRUE; } uint16_t swap16(uint16_t val, int swap) { if (swap) return (((val & 0x00ff) << 8) | ((val & 0xff00) >> 8)); else return val; } uint32_t swap32(uint32_t val, int swap) { if (swap) return (((val & 0x000000ffU) << 24) | ((val & 0x0000ff00U) << 8) | ((val & 0x00ff0000U) >> 8) | ((val & 0xff000000U) >> 24)); else return val; } /* * Get a sufficiently large buffer for cpumask. * You should call FREEBUF() on the result when you no longer need it. */ ulong * get_cpumask_buf(void) { int cpulen; if ((cpulen = STRUCT_SIZE("cpumask_t")) < 0) cpulen = DIV_ROUND_UP(kt->cpus, BITS_PER_LONG) * sizeof(ulong); return (ulong *)GETBUF(cpulen); } int make_cpumask(char *s, ulong *mask, int flags, int *errptr) { char *p, *q, *orig; int start, end; int i; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "make_cpumask: received NULL string\n"); orig = NULL; goto make_cpumask_error; } orig = strdup(s); p = strtok(s, ","); while (p) { s = strtok(NULL, ""); if (STREQ(p, "a") || STREQ(p, "all")) { start = 0; end = kt->cpus - 1; } else { start = end = -1; q = strtok(p, "-"); start = dtoi(q, flags, errptr); if ((q = strtok(NULL, "-"))) end = dtoi(q, flags, errptr); if (end == -1) end = start; } if ((start < 0) || (start >= kt->cpus) || (end < 0) || (end >= kt->cpus)) { error(INFO, "invalid cpu specification: %s\n", orig); goto make_cpumask_error; } for (i = start; i <= end; i++) SET_BIT(mask, i); p = strtok(s, ","); } free(orig); return TRUE; make_cpumask_error: free(orig); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Copy a string into a sized buffer. If necessary, truncate * the resultant string in the sized buffer so that it will * always be NULL-terminated. */ size_t strlcpy(char *dest, char *src, size_t size) { size_t ret = strlen(src); if (size) { size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } return ret; } struct rb_node * rb_first(struct rb_root *root) { struct rb_root rloc; struct rb_node *n; struct rb_node nloc; readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), "rb_root", FAULT_ON_ERROR); n = rloc.rb_node; if (!n) return NULL; while (rb_left(n, &nloc)) n = nloc.rb_left; return n; } struct rb_node * rb_parent(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return (struct rb_node *)(nloc->rb_parent_color & ~3); } struct rb_node * rb_right(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return nloc->rb_right; } struct rb_node * rb_left(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return nloc->rb_left; } struct rb_node * rb_next(struct rb_node *node) { struct rb_node nloc; struct rb_node *parent; /* node is destroyed */ if (!accessible((ulong)node)) return NULL; parent = rb_parent(node, &nloc); if (parent == node) return NULL; if (nloc.rb_right) { /* rb_right is destroyed */ if (!accessible((ulong)nloc.rb_right)) return NULL; node = nloc.rb_right; while (rb_left(node, &nloc)) { /* rb_left is destroyed */ if (!accessible((ulong)nloc.rb_left)) return NULL; node = nloc.rb_left; } return node; } while ((parent = rb_parent(node, &nloc))) { /* parent is destroyed */ if (!accessible((ulong)parent)) return NULL; if (node != rb_right(parent, &nloc)) break; node = parent; } return parent; } struct rb_node * rb_last(struct rb_root *root) { struct rb_node *node; struct rb_node nloc; /* meet destroyed data */ if (!accessible((ulong)(root + OFFSET(rb_root_rb_node)))) return NULL; readmem((ulong)(root + OFFSET(rb_root_rb_node)), KVADDR, &node, sizeof(node), "rb_root node", FAULT_ON_ERROR); while (1) { if (!node) break; /* meet destroyed data */ if (!accessible((ulong)node)) return NULL; readmem((ulong)node, KVADDR, &nloc, sizeof(struct rb_node), "rb_node last", FAULT_ON_ERROR); /* meet the last one */ if (!nloc.rb_right) break; /* meet destroyed data */ if (!!accessible((ulong)nloc.rb_right)) break; node = nloc.rb_right; } return node; } crash-7.1.4/lkcd_v2_v3.c0000775000000000000000000003637312634305150013441 0ustar rootroot/* lkcd_v2_v3.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #define CONFIG_VMDUMP #include "lkcd_vmdump_v2_v3.h" static dump_header_t dump_header_v2_v3 = { 0 }; static dump_page_t dump_page = { 0 }; static dump_header_asm_t dump_header_asm = { 0 }; static void mclx_cache_page_headers_v3(void); /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v2_v3(FILE *fp, int fd) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_header_asm_t *dha; dump_page_t *dp; lkcd->fd = fd; lkcd->fp = fp; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v2_v3; dha = &dump_header_asm; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; if (dh->dh_version & LKCD_DUMP_MCLX_V1) lseek(lkcd->fd, MCLX_V1_PAGE_HEADER_CACHE, SEEK_CUR); if (read(lkcd->fd, dha, sizeof(dump_header_asm_t)) != sizeof(dump_header_asm_t)) return FALSE; lkcd->dump_page = dp; lkcd->dump_header = dh; lkcd->dump_header_asm = dha; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->total_pages = dh->dh_num_pages; lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->zoned_offsets = 0; lkcd->get_dp_flags = get_dp_flags_v2_v3; lkcd->get_dp_address = get_dp_address_v2_v3; lkcd->get_dp_size = get_dp_size_v2_v3; lkcd->compression = LKCD_DUMP_COMPRESS_RLE; lkcd->page_header_size = sizeof(dump_page_t); lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: lkcd_print("reached EOF\n"); eof = TRUE; continue; } if (dp->dp_flags & ~(DUMP_COMPRESSED|DUMP_RAW|DUMP_END|LKCD_DUMP_MCLX_V0)) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) lkcd->flags |= LKCD_MCLX; if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_END) { lkcd_print("found DUMP_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(2)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (dh->dh_version & LKCD_DUMP_MCLX_V1) mclx_cache_page_headers_v3(); if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v2_v3(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v2_v3(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v2_v3(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } void dump_dump_page_v2_v3(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console(BITS32() ? "dp_address: %llx " : "dp_address: %lx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_COMPRESSED) console("DUMP_COMPRESSED", others++); if (flags & DUMP_RAW) console("%sDUMP_RAW", others++ ? "|" : ""); if (flags & DUMP_END) console("%sDUMP_END", others++ ? "|" : ""); if (flags & LKCD_DUMP_MCLX_V0) console("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); console(")\n"); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v2_v3(ulong arg) { int others; dump_header_t *dh; dump_header_asm_t *dha; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dha = (dump_header_asm_t *)lkcd->dump_header_asm; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else lkcd_print("(?)\n"); others = 0; lkcd_print(" dh_version: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_version); switch (dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) { case LKCD_DUMP_V1: lkcd_print("%sLKCD_DUMP_V1", others++ ? "|" : ""); break; case LKCD_DUMP_V2: lkcd_print("%sLKCD_DUMP_V2", others++ ? "|" : ""); break; case LKCD_DUMP_V3: lkcd_print("%sLKCD_DUMP_V3", others++ ? "|" : ""); break; } if (dh->dh_version & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); if (dh->dh_version & LKCD_DUMP_MCLX_V1) lkcd_print("%sLKCD_DUMP_MCLX_V1", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_level); others = 0; if (dh->dh_dump_level & DUMP_HEADER) lkcd_print("%sDUMP_HEADER", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_KERN) lkcd_print("%sDUMP_KERN", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_USED) lkcd_print("%sDUMP_USED", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_ALL) lkcd_print("%sDUMP_ALL", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_page_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: "); lkcd_print(BITS32() ? "%lld\n" : "%ld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_end); lkcd_print(" dh_num_pages: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && dh->dh_panic_string && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print(" dh_utsname:\n"); lkcd_print(" sysname: %s\n", dh->dh_utsname.sysname); lkcd_print(" nodename: %s\n", dh->dh_utsname.nodename); lkcd_print(" release: %s\n", dh->dh_utsname.release); lkcd_print(" version: %s\n", dh->dh_utsname.version); lkcd_print(" machine: %s\n", dh->dh_utsname.machine); lkcd_print(" domainname: %s\n", dh->dh_utsname.domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print("dha_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dha->dha_magic_number); if (dha->dha_magic_number == DUMP_ASM_MAGIC_NUMBER) lkcd_print("(DUMP_ASM_MAGIC_NUMBER)\n"); else lkcd_print("(?)\n"); lkcd_print(" dha_version: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dha->dha_version); lkcd_print(" dha_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dha->dha_header_size); #ifdef X86 lkcd_print(" dha_esp: %lx\n", dha->dha_esp); lkcd_print(" dha_eip: %lx\n", dha->dha_eip); #endif #if defined PPC || ALPHA || IA64 /* TBD */ #endif lkcd_print(" dha_regs:\n"); #ifdef PPC lkcd_print(" (PowerPC register display TBD)\n"); #endif #ifdef IA64 lkcd_print(" (IA64 register display TBD)\n"); #endif #ifdef X86 lkcd_print(" ebx: %lx\n", dha->dha_regs.ebx); lkcd_print(" ecx: %lx\n", dha->dha_regs.ecx); lkcd_print(" edx: %lx\n", dha->dha_regs.edx); lkcd_print(" esi: %lx\n", dha->dha_regs.esi); lkcd_print(" edi: %lx\n", dha->dha_regs.edi); lkcd_print(" eax: %lx\n", dha->dha_regs.eax); lkcd_print(" xds: %x\n", dha->dha_regs.xds); lkcd_print(" xes: %x\n", dha->dha_regs.xes); lkcd_print(" orig_eax: %lx\n", dha->dha_regs.orig_eax); lkcd_print(" eip: %lx\n", dha->dha_regs.eip); lkcd_print(" xcs: %x\n", dha->dha_regs.xcs); lkcd_print(" eflags: %lx\n", dha->dha_regs.eflags); lkcd_print(" esp: %lx\n", dha->dha_regs.esp); lkcd_print(" xss: %x\n", dha->dha_regs.xss); #endif #ifdef ALPHA lkcd_print(" r0: %lx\n", dha->dha_regs.r0); lkcd_print(" r1: %lx\n", dha->dha_regs.r1); lkcd_print(" r2: %lx\n", dha->dha_regs.r2); lkcd_print(" r3: %lx\n", dha->dha_regs.r3); lkcd_print(" r4: %lx\n", dha->dha_regs.r4); lkcd_print(" r5: %lx\n", dha->dha_regs.r5); lkcd_print(" r6: %lx\n", dha->dha_regs.r6); lkcd_print(" r7: %lx\n", dha->dha_regs.r7); lkcd_print(" r8: %lx\n", dha->dha_regs.r8); lkcd_print(" r19: %lx\n", dha->dha_regs.r19); lkcd_print(" r20: %lx\n", dha->dha_regs.r20); lkcd_print(" r21: %lx\n", dha->dha_regs.r21); lkcd_print(" r22: %lx\n", dha->dha_regs.r22); lkcd_print(" r23: %lx\n", dha->dha_regs.r23); lkcd_print(" r24: %lx\n", dha->dha_regs.r24); lkcd_print(" r25: %lx\n", dha->dha_regs.r25); lkcd_print(" r26: %lx\n", dha->dha_regs.r26); lkcd_print(" r27: %lx\n", dha->dha_regs.r27); lkcd_print(" r28: %lx\n", dha->dha_regs.r28); lkcd_print(" hae: %lx\n", dha->dha_regs.hae); lkcd_print(" trap_a0: %lx\n", dha->dha_regs.trap_a0); lkcd_print(" trap_a1: %lx\n", dha->dha_regs.trap_a1); lkcd_print(" trap_a2: %lx\n", dha->dha_regs.trap_a2); lkcd_print(" ps: %lx\n", dha->dha_regs.ps); lkcd_print(" pc: %lx\n", dha->dha_regs.pc); lkcd_print(" gp: %lx\n", dha->dha_regs.gp); lkcd_print(" r16: %lx\n", dha->dha_regs.r16); lkcd_print(" r17: %lx\n", dha->dha_regs.r17); lkcd_print(" r18: %lx\n", dha->dha_regs.r18); #endif if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dp->dp_address); lkcd_print(" dp_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dp->dp_size); lkcd_print(" dp_flags: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_COMPRESSED) lkcd_print("DUMP_COMPRESSED", others++); if (dp->dp_flags & DUMP_RAW) lkcd_print("%sDUMP_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_END) lkcd_print("%sDUMP_END", others++ ? "|" : ""); if (dp->dp_flags & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); lkcd_print(")\n"); } /* * Read the MCLX-enhanced page header cache. Verify the first one, which * is a pointer to the page header for address 1MB, and take the rest at * blind faith. Note that the page headers do not include the 64K dump * header offset, which must be added to the values found. */ static void mclx_cache_page_headers_v3(void) { int i; uint64_t physaddr1, physaddr2, page_headers[MCLX_PAGE_HEADERS]; dump_page_t dump_page, *dp; ulong granularity; if (LKCD_DEBUG(2)) /* dump headers have all been read */ return; if (lkcd->total_pages > MEGABYTES(1))/* greater than 4G not supported */ return; if (lseek(lkcd->fd, sizeof(dump_header_t), SEEK_SET) == -1) return; if (read(lkcd->fd, page_headers, MCLX_V1_PAGE_HEADER_CACHE) != MCLX_V1_PAGE_HEADER_CACHE) return; dp = &dump_page; /* * Determine the granularity between offsets. */ if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr2 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if ((physaddr1 % MEGABYTES(1)) || (physaddr2 % MEGABYTES(1)) || (physaddr2 < physaddr1)) return; granularity = physaddr2 - physaddr1; for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { if (!page_headers[i]) break; lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE; set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } crash-7.1.4/net.c0000775000000000000000000013233712634305150012270 0ustar rootroot/* net.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include #include /* * Cache values we need that can change based on OS version, or any other * variables static to this file. These are setup in net_init(). Dump * the table during runtime via "help -n". */ struct net_table { ulong flags; char *netdevice; /* name of net device */ char *dev_name_t; /* readmem ID's */ char *dev_type_t; char *dev_addr_t; long dev_name; long dev_next; long dev_type; long dev_addr_len; long dev_ip_ptr; long in_device_ifa_list; long in_ifaddr_ifa_next; long in_ifaddr_ifa_address; int net_device_name_index; } net_table = { 0 }; struct net_table *net = &net_table; #define NETDEV_INIT (0x1) #define STRUCT_DEVICE (0x2) #define STRUCT_NET_DEVICE (0x4) #define SOCK_V1 (0x8) #define SOCK_V2 (0x10) #define NO_INET_SOCK (0x20) #define DEV_NAME_MAX 100 struct devinfo { char dev_name[DEV_NAME_MAX]; unsigned char dev_addr_len; short dev_type; }; #define BYTES_IP_ADDR 15 /* bytes to print IP addr (xxx.xxx.xxx.xxx) */ #define BYTES_PORT_NUM 5 /* bytes to print port number */ /* bytes needed for : notation */ #define BYTES_IP_TUPLE (BYTES_IP_ADDR + BYTES_PORT_NUM + 1) static void show_net_devices(ulong); static void show_net_devices_v2(ulong); static void show_net_devices_v3(ulong); static void print_neighbour_q(ulong, int); static void get_netdev_info(ulong, struct devinfo *); static void get_device_name(ulong, char *); static void get_device_address(ulong, char *); static void get_sock_info(ulong, char *); static void dump_arp(void); static void arp_state_to_flags(unsigned char); static void dump_ether_hw(unsigned char *, int); static void dump_sockets(ulong, struct reference *); static int sym_socket_dump(ulong, int, int, ulong, struct reference *); static void dump_hw_addr(unsigned char *, int); static char *dump_in6_addr_port(uint16_t *, uint16_t, char *, int *); #define MK_TYPE_T(f,s,m) \ do { \ (f) = malloc(strlen(s) + strlen(m) + 2); \ if ((f) == NULL) { \ error(WARNING, "malloc fail for type %s.%s", (s), (m)); \ } else { \ sprintf((f), "%s %s", (s), (m)); \ } \ } while(0) void net_init(void) { /* * Note the order of the following checks. The device struct was * renamed to net_device in 2.3, but there may be another struct * called 'device' so we check for the new one first. */ STRUCT_SIZE_INIT(net_device, "net_device"); if (VALID_STRUCT(net_device)) { net->netdevice = "net_device"; net->dev_next = MEMBER_OFFSET_INIT(net_device_next, "net_device", "next"); net->dev_name = MEMBER_OFFSET_INIT(net_device_name, "net_device", "name"); net->dev_type = MEMBER_OFFSET_INIT(net_device_type, "net_device", "type"); net->dev_addr_len = MEMBER_OFFSET_INIT(net_device_addr_len, "net_device", "addr_len"); net->dev_ip_ptr = MEMBER_OFFSET_INIT(net_device_ip_ptr, "net_device", "ip_ptr"); MEMBER_OFFSET_INIT(net_device_dev_list, "net_device", "dev_list"); MEMBER_OFFSET_INIT(net_dev_base_head, "net", "dev_base_head"); ARRAY_LENGTH_INIT(net->net_device_name_index, net_device_name, "net_device.name", NULL, sizeof(char)); net->flags |= (NETDEV_INIT|STRUCT_NET_DEVICE); } else { STRUCT_SIZE_INIT(device, "device"); if (VALID_STRUCT(device)) { net->netdevice = "device"; net->dev_next = MEMBER_OFFSET_INIT(device_next, "device", "next"); net->dev_name = MEMBER_OFFSET_INIT(device_name, "device", "name"); net->dev_type = MEMBER_OFFSET_INIT(device_type, "device", "type"); net->dev_ip_ptr = MEMBER_OFFSET_INIT(device_ip_ptr, "device", "ip_ptr"); net->dev_addr_len = MEMBER_OFFSET_INIT(device_addr_len, "device", "addr_len"); net->flags |= (NETDEV_INIT|STRUCT_DEVICE); } else error(WARNING, "net_init: unknown device type for net device"); } if (VALID_MEMBER(task_struct_nsproxy)) MEMBER_OFFSET_INIT(nsproxy_net_ns, "nsproxy", "net_ns"); if (net->flags & NETDEV_INIT) { MK_TYPE_T(net->dev_name_t, net->netdevice, "name"); MK_TYPE_T(net->dev_type_t, net->netdevice, "type"); MK_TYPE_T(net->dev_addr_t, net->netdevice, "addr_len"); MEMBER_OFFSET_INIT(socket_sk, "socket", "sk"); MEMBER_OFFSET_INIT(neighbour_next, "neighbour", "next"); MEMBER_OFFSET_INIT(neighbour_primary_key, "neighbour", "primary_key"); MEMBER_OFFSET_INIT(neighbour_ha, "neighbour", "ha"); MEMBER_OFFSET_INIT(neighbour_dev, "neighbour", "dev"); MEMBER_OFFSET_INIT(neighbour_nud_state, "neighbour", "nud_state"); MEMBER_OFFSET_INIT(neigh_table_nht_ptr, "neigh_table", "nht"); if (VALID_MEMBER(neigh_table_nht_ptr)) { MEMBER_OFFSET_INIT(neigh_table_hash_mask, "neigh_hash_table", "hash_mask"); MEMBER_OFFSET_INIT(neigh_table_hash_shift, "neigh_hash_table", "hash_shift"); MEMBER_OFFSET_INIT(neigh_table_hash_buckets, "neigh_hash_table", "hash_buckets"); } else { MEMBER_OFFSET_INIT(neigh_table_hash_buckets, "neigh_table", "hash_buckets"); MEMBER_OFFSET_INIT(neigh_table_hash_mask, "neigh_table", "hash_mask"); } MEMBER_OFFSET_INIT(neigh_table_key_len, "neigh_table", "key_len"); MEMBER_OFFSET_INIT(in_device_ifa_list, "in_device", "ifa_list"); MEMBER_OFFSET_INIT(in_ifaddr_ifa_next, "in_ifaddr", "ifa_next"); MEMBER_OFFSET_INIT(in_ifaddr_ifa_address, "in_ifaddr", "ifa_address"); STRUCT_SIZE_INIT(sock, "sock"); MEMBER_OFFSET_INIT(sock_family, "sock", "family"); if (VALID_MEMBER(sock_family)) { MEMBER_OFFSET_INIT(sock_daddr, "sock", "daddr"); MEMBER_OFFSET_INIT(sock_rcv_saddr, "sock", "rcv_saddr"); MEMBER_OFFSET_INIT(sock_dport, "sock", "dport"); MEMBER_OFFSET_INIT(sock_sport, "sock", "sport"); MEMBER_OFFSET_INIT(sock_num, "sock", "num"); MEMBER_OFFSET_INIT(sock_type, "sock", "type"); net->flags |= SOCK_V1; } else { /* * struct sock { * struct sock_common __sk_common; * #define sk_family __sk_common.skc_family * ... */ MEMBER_OFFSET_INIT(sock_common_skc_family, "sock_common", "skc_family"); MEMBER_OFFSET_INIT(sock_sk_type, "sock", "sk_type"); /* * struct inet_sock { * struct sock sk; * struct ipv6_pinfo *pinet6; * struct inet_opt inet; * }; */ STRUCT_SIZE_INIT(inet_sock, "inet_sock"); STRUCT_SIZE_INIT(socket, "socket"); if (STRUCT_EXISTS("inet_opt")) { MEMBER_OFFSET_INIT(inet_sock_inet, "inet_sock", "inet"); MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_opt", "daddr"); MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_opt", "rcv_saddr"); MEMBER_OFFSET_INIT(inet_opt_dport, "inet_opt", "dport"); MEMBER_OFFSET_INIT(inet_opt_sport, "inet_opt", "sport"); MEMBER_OFFSET_INIT(inet_opt_num, "inet_opt", "num"); } else { /* inet_opt moved to inet_sock */ ASSIGN_OFFSET(inet_sock_inet) = 0; if (MEMBER_EXISTS("inet_sock", "daddr")) { MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_sock", "daddr"); MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_sock", "rcv_saddr"); MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "dport"); MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "sport"); MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "num"); } else if (MEMBER_EXISTS("inet_sock", "inet_daddr")) { MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_sock", "inet_daddr"); MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_sock", "inet_rcv_saddr"); MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "inet_dport"); MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "inet_sport"); MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "inet_num"); } else if ((MEMBER_OFFSET("inet_sock", "sk") == 0) && (MEMBER_OFFSET("sock", "__sk_common") == 0)) { MEMBER_OFFSET_INIT(inet_opt_daddr, "sock_common", "skc_daddr"); if (INVALID_MEMBER(inet_opt_daddr)) ANON_MEMBER_OFFSET_INIT(inet_opt_daddr, "sock_common", "skc_daddr"); MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "sock_common", "skc_rcv_saddr"); if (INVALID_MEMBER(inet_opt_rcv_saddr)) ANON_MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "sock_common", "skc_rcv_saddr"); MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "inet_dport"); if (INVALID_MEMBER(inet_opt_dport)) ANON_MEMBER_OFFSET_INIT(inet_opt_dport, "sock_common", "skc_dport"); MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "inet_sport"); MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "inet_num"); if (INVALID_MEMBER(inet_opt_num)) ANON_MEMBER_OFFSET_INIT(inet_opt_num, "sock_common", "skc_num"); } } if (VALID_STRUCT(inet_sock) && INVALID_MEMBER(inet_sock_inet)) { /* * gdb can't seem to figure out the inet_sock * in later 2.6 kernels, returning this: * * struct inet_sock { * * } * * It does know the struct size, so kludge it * to subtract the size of the inet_opt struct * from the size of the containing inet_sock. */ net->flags |= NO_INET_SOCK; ASSIGN_OFFSET(inet_sock_inet) = SIZE(inet_sock) - STRUCT_SIZE("inet_opt"); } /* * If necessary, set inet_sock size and inet_sock_inet offset, * accounting for the configuration-dependent, intervening, * struct ipv6_pinfo pointer located in between the sock and * inet_opt members of the inet_sock. */ if (!VALID_STRUCT(inet_sock)) { if (symbol_exists("tcpv6_protocol") && symbol_exists("udpv6_protocol")) { ASSIGN_SIZE(inet_sock) = SIZE(sock) + sizeof(void *) + STRUCT_SIZE("inet_opt"); ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock) + sizeof(void *); } else { ASSIGN_SIZE(inet_sock) = SIZE(sock) + STRUCT_SIZE("inet_opt"); ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock); } } MEMBER_OFFSET_INIT(ipv6_pinfo_rcv_saddr, "ipv6_pinfo", "rcv_saddr"); MEMBER_OFFSET_INIT(ipv6_pinfo_daddr, "ipv6_pinfo", "daddr"); STRUCT_SIZE_INIT(in6_addr, "in6_addr"); MEMBER_OFFSET_INIT(socket_alloc_vfs_inode, "socket_alloc", "vfs_inode"); net->flags |= SOCK_V2; } } } /* * The net command... */ #define NETOPTS "N:asSR:xdn" #define s_FLAG FOREACH_s_FLAG #define S_FLAG FOREACH_S_FLAG #define x_FLAG FOREACH_x_FLAG #define d_FLAG FOREACH_d_FLAG #define NET_REF_FOUND (0x1) #define NET_REF_HEXNUM (0x2) #define NET_REF_DECNUM (0x4) #define NET_TASK_HEADER_PRINTED (0x8) #define NET_SOCK_HEADER_PRINTED (0x10) #define NET_REF_FOUND_ITEM (0x20) #define NET_REFERENCE_CHECK(X) (X) #define NET_REFERENCE_FOUND(X) ((X) && ((X)->cmdflags & NET_REF_FOUND)) void cmd_net(void) { int c; ulong sflag, nflag, aflag; ulong value; ulong task; struct task_context *tc = NULL; struct in_addr in_addr; struct reference reference, *ref; if (!(net->flags & NETDEV_INIT)) error(FATAL, "net subsystem not initialized!"); ref = NULL; sflag = nflag = aflag = 0; task = pid_to_task(0); while ((c = getopt(argcnt, args, NETOPTS)) != EOF) { switch (c) { case 'R': if (ref) error(INFO, "only one -R option allowed\n"); else { ref = &reference; BZERO(ref, sizeof(struct reference)); ref->str = optarg; } break; case 'a': dump_arp(); aflag++; break; case 'N': value = stol(optarg, FAULT_ON_ERROR, NULL); in_addr.s_addr = (in_addr_t)value; fprintf(fp, "%s\n", inet_ntoa(in_addr)); return; case 's': if (sflag & S_FLAG) error(INFO, "only one -s or -S option allowed\n"); else sflag |= s_FLAG; break; case 'S': if (sflag & s_FLAG) error(INFO, "only one -s or -S option allowed\n"); else sflag |= S_FLAG; break; case 'x': if (sflag & d_FLAG) error(FATAL, "-d and -x are mutually exclusive\n"); sflag |= x_FLAG; break; case 'd': if (sflag & x_FLAG) error(FATAL, "-d and -x are mutually exclusive\n"); sflag |= d_FLAG; break; case 'n': nflag = 1; task = CURRENT_TASK(); if (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: case STR_TASK: task = tc->task; } } break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (sflag & (s_FLAG|S_FLAG)) dump_sockets(sflag, ref); else { if ((argcnt == 1) || nflag) show_net_devices(task); else if (!aflag) cmd_usage(pc->curcmd, SYNOPSIS); } } /* * Just display the address and name of each net device. */ static void show_net_devices(ulong task) { ulong next; long flen; char buf[BUFSIZE]; if (symbol_exists("dev_base_head")) { show_net_devices_v2(task); return; } else if (symbol_exists("init_net")) { show_net_devices_v3(task); return; } if (!symbol_exists("dev_base")) error(FATAL, "dev_base, dev_base_head or init_net do not exist!\n"); get_symbol_data("dev_base", sizeof(void *), &next); if (!net->netdevice || !next) return; flen = MAX(VADDR_PRLEN, strlen(net->netdevice)); fprintf(fp, "%s NAME IP ADDRESS(ES)\n", mkstring(upper_case(net->netdevice, buf), flen, CENTER|LJUST, NULL)); do { fprintf(fp, "%s ", mkstring(buf, flen, CENTER|RJUST|LONG_HEX, MKSTR(next))); get_device_name(next, buf); fprintf(fp, "%-6s ", buf); get_device_address(next, buf); fprintf(fp, "%s\n", buf); readmem(next+net->dev_next, KVADDR, &next, sizeof(void *), "(net_)device.next", FAULT_ON_ERROR); } while (next); } static void show_net_devices_v2(ulong task) { struct list_data list_data, *ld; char *net_device_buf; char buf[BUFSIZE]; int ndevcnt, i; long flen; if (!net->netdevice) /* initialized in net_init() */ return; flen = MAX(VADDR_PRLEN, strlen(net->netdevice)); fprintf(fp, "%s NAME IP ADDRESS(ES)\n", mkstring(upper_case(net->netdevice, buf), flen, CENTER|LJUST, NULL)); net_device_buf = GETBUF(SIZE(net_device)); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; get_symbol_data("dev_base_head", sizeof(void *), &ld->start); ld->end = symbol_value("dev_base_head"); ld->list_head_offset = OFFSET(net_device_dev_list); ndevcnt = do_list(ld); for (i = 0; i < ndevcnt; ++i) { readmem(ld->list_ptr[i], KVADDR, net_device_buf, SIZE(net_device), "net_device buffer", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|RJUST|LONG_HEX, MKSTR(ld->list_ptr[i]))); get_device_name(ld->list_ptr[i], buf); fprintf(fp, "%-6s ", buf); get_device_address(ld->list_ptr[i], buf); fprintf(fp, "%s\n", buf); } FREEBUF(ld->list_ptr); FREEBUF(net_device_buf); } static void show_net_devices_v3(ulong task) { ulong nsproxy_p, net_ns_p; struct list_data list_data, *ld; char *net_device_buf; char buf[BUFSIZE]; int ndevcnt, i; long flen; if (!net->netdevice) /* initialized in net_init() */ return; flen = MAX(VADDR_PRLEN, strlen(net->netdevice)); fprintf(fp, "%s NAME IP ADDRESS(ES)\n", mkstring(upper_case(net->netdevice, buf), flen, CENTER|LJUST, NULL)); net_device_buf = GETBUF(SIZE(net_device)); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; if (VALID_MEMBER(nsproxy_net_ns)) { readmem(task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy_p, sizeof(ulong), "task_struct.nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy_p + OFFSET(nsproxy_net_ns), KVADDR, &net_ns_p, sizeof(ulong), "nsproxy.net_ns", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine net_namespace location!\n"); } else net_ns_p = symbol_value("init_net"); ld->start = ld->end = net_ns_p + OFFSET(net_dev_base_head); ld->list_head_offset = OFFSET(net_device_dev_list); ndevcnt = do_list(ld); /* * Skip the first entry (init_net). */ for (i = 1; i < ndevcnt; ++i) { readmem(ld->list_ptr[i], KVADDR, net_device_buf, SIZE(net_device), "net_device buffer", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|RJUST|LONG_HEX, MKSTR(ld->list_ptr[i]))); get_device_name(ld->list_ptr[i], buf); fprintf(fp, "%-6s ", buf); get_device_address(ld->list_ptr[i], buf); fprintf(fp, "%s\n", buf); } FREEBUF(ld->list_ptr); FREEBUF(net_device_buf); } /* * Perform the actual work of dumping the ARP table... */ #define ARP_HEADING \ "NEIGHBOUR IP ADDRESS HW TYPE HW ADDRESS DEVICE STATE" static void dump_arp(void) { ulong arp_tbl; /* address of arp_tbl */ ulong *hash_buckets; ulong hash; long hash_bytes; int nhash_buckets = 0; int key_len; int i; int header_printed = 0; int hash_mask = 0; ulong nht; if (!symbol_exists("arp_tbl")) error(FATAL, "arp_tbl does not exist in this kernel\n"); arp_tbl = symbol_value("arp_tbl"); /* * NOTE: 2.6.8 -> 2.6.9 neigh_table struct changed from: * * struct neighbour *hash_buckets[32]; * to * struct neighbour **hash_buckets; * * Use 'hash_mask' as indicator to decide if we're dealing * with an array or a pointer. * * Around 2.6.37 neigh_hash_table struct has been introduced * and pointer to it has been added to neigh_table. */ if (VALID_MEMBER(neigh_table_nht_ptr)) { readmem(arp_tbl + OFFSET(neigh_table_nht_ptr), KVADDR, &nht, sizeof(nht), "neigh_table nht", FAULT_ON_ERROR); /* NB! Re-use of offsets like neigh_table_hash_mask * with neigh_hash_table structure */ if (VALID_MEMBER(neigh_table_hash_mask)) { readmem(nht + OFFSET(neigh_table_hash_mask), KVADDR, &hash_mask, sizeof(hash_mask), "neigh_hash_table hash_mask", FAULT_ON_ERROR); nhash_buckets = hash_mask + 1; } else if (VALID_MEMBER(neigh_table_hash_shift)) { readmem(nht + OFFSET(neigh_table_hash_shift), KVADDR, &hash_mask, sizeof(hash_mask), "neigh_hash_table hash_shift", FAULT_ON_ERROR); nhash_buckets = 1U << hash_mask; } } else if (VALID_MEMBER(neigh_table_hash_mask)) { readmem(arp_tbl + OFFSET(neigh_table_hash_mask), KVADDR, &hash_mask, sizeof(hash_mask), "neigh_table hash_mask", FAULT_ON_ERROR); nhash_buckets = hash_mask + 1; } else nhash_buckets = (i = ARRAY_LENGTH(neigh_table_hash_buckets)) ? i : get_array_length("neigh_table.hash_buckets", NULL, sizeof(void *)); if (nhash_buckets == 0) { option_not_supported('a'); return; } hash_bytes = nhash_buckets * sizeof(*hash_buckets); hash_buckets = (ulong *)GETBUF(hash_bytes); readmem(arp_tbl + OFFSET(neigh_table_key_len), KVADDR, &key_len, sizeof(key_len), "neigh_table key_len", FAULT_ON_ERROR); if (VALID_MEMBER(neigh_table_nht_ptr)) { readmem(nht + OFFSET(neigh_table_hash_buckets), KVADDR, &hash, sizeof(hash), "neigh_hash_table hash_buckets ptr", FAULT_ON_ERROR); readmem(hash, KVADDR, hash_buckets, hash_bytes, "neigh_hash_table hash_buckets", FAULT_ON_ERROR); } else if (hash_mask) { readmem(arp_tbl + OFFSET(neigh_table_hash_buckets), KVADDR, &hash, sizeof(hash), "neigh_table hash_buckets pointer", FAULT_ON_ERROR); readmem(hash, KVADDR, hash_buckets, hash_bytes, "neigh_table hash_buckets", FAULT_ON_ERROR); } else readmem(arp_tbl + OFFSET(neigh_table_hash_buckets), KVADDR, hash_buckets, hash_bytes, "neigh_table hash_buckets", FAULT_ON_ERROR); for (i = 0; i < nhash_buckets; i++) { if (hash_buckets[i] != (ulong)NULL) { if (!header_printed) { fprintf(fp, "%s\n", ARP_HEADING); header_printed = 1; } print_neighbour_q(hash_buckets[i], key_len); } } fflush(fp); FREEBUF(hash_buckets); } /* * Dump out the relevant information of a neighbour structure for the * ARP table. */ static void print_neighbour_q(ulong addr, int key_len) { int i; ulong dev; /* dev address of this struct */ unsigned char *ha_buf; /* buffer for hardware address */ uint ha_size; /* size of HW address */ uint ipaddr; /* hold ipaddr (aka primary_key) */ struct devinfo dinfo; unsigned char state; /* state of ARP entry */ struct in_addr in_addr; ha_size = (i = ARRAY_LENGTH(neighbour_ha)) ? i : get_array_length("neighbour.ha", NULL, sizeof(char)); ha_buf = (unsigned char *)GETBUF(ha_size); while (addr) { readmem(addr + OFFSET(neighbour_primary_key), KVADDR, &ipaddr, sizeof(ipaddr), "neighbour primary_key", FAULT_ON_ERROR); readmem(addr + OFFSET(neighbour_ha), KVADDR, ha_buf, ha_size, "neighbour ha", FAULT_ON_ERROR); readmem(addr + OFFSET(neighbour_dev), KVADDR, &dev, sizeof(dev), "neighbour dev", FAULT_ON_ERROR); get_netdev_info(dev, &dinfo); readmem(addr + OFFSET(neighbour_nud_state), KVADDR, &state, sizeof(state), "neighbour nud_state", FAULT_ON_ERROR); in_addr.s_addr = ipaddr; fprintf(fp, "%-16lx %-16s", addr, inet_ntoa(in_addr)); switch (dinfo.dev_type) { case ARPHRD_ETHER: /* * Use the actual HW address size in the device struct * rather than the max size of the array (as was done * during the readmem() call above.... */ fprintf(fp, "%-10s ", "ETHER"); dump_ether_hw(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_NETROM: fprintf(fp, "%-10s ", "NETROM"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_EETHER: fprintf(fp, "%-10s ", "EETHER"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_AX25: fprintf(fp, "%-10s ", "AX25"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_PRONET: fprintf(fp, "%-10s ", "PRONET"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_CHAOS: fprintf(fp, "%-10s ", "CHAOS"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_IEEE802: fprintf(fp, "%-10s ", "IEEE802"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_ARCNET: fprintf(fp, "%-10s ", "ARCNET"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_APPLETLK: fprintf(fp, "%-10s ", "APPLETLK"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_DLCI: fprintf(fp, "%-10s ", "DLCI"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_METRICOM: fprintf(fp, "%-10s ", "METRICOM"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; default: fprintf(fp, "%-10s ", "UNKNOWN"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; } fprintf(fp, " %-6s ", dinfo.dev_name); arp_state_to_flags(state); readmem(addr + OFFSET(neighbour_next), KVADDR, &addr, sizeof(addr), "neighbour next", FAULT_ON_ERROR); } FREEBUF(ha_buf); } /* * read netdevice info.... */ static void get_netdev_info(ulong devaddr, struct devinfo *dip) { short dev_type; get_device_name(devaddr, dip->dev_name); readmem(devaddr + net->dev_type, KVADDR, &dev_type, sizeof(dev_type), net->dev_type_t, FAULT_ON_ERROR); dip->dev_type = dev_type; readmem(devaddr + net->dev_addr_len, KVADDR, &dip->dev_addr_len, sizeof(dip->dev_addr_len), net->dev_addr_t, FAULT_ON_ERROR); } /* * Get the device name. */ static void get_device_name(ulong devaddr, char *buf) { ulong name_addr; switch (net->flags & (STRUCT_DEVICE|STRUCT_NET_DEVICE)) { case STRUCT_NET_DEVICE: if (net->net_device_name_index > 0) { readmem(devaddr + net->dev_name, KVADDR, buf, net->net_device_name_index, net->dev_name_t, FAULT_ON_ERROR); return; } /* fallthrough */ case STRUCT_DEVICE: readmem(devaddr + net->dev_name, KVADDR, &name_addr, sizeof(name_addr), net->dev_name_t, FAULT_ON_ERROR); read_string(name_addr, buf, DEV_NAME_MAX); break; } } /* * Get the device address. * * {net_}device->ip_ptr points to in_device. * in_device->in_ifaddr points to in_ifaddr list. * in_ifaddr->ifa_address contains the address. * in_ifaddr->ifa_next points to the next in_ifaddr in the list (if any). * */ static void get_device_address(ulong devaddr, char *buf) { ulong ip_ptr, ifa_list; struct in_addr ifa_address; BZERO(buf, BUFSIZE); readmem(devaddr + net->dev_ip_ptr, KVADDR, &ip_ptr, sizeof(ulong), "ip_ptr", FAULT_ON_ERROR); if (!ip_ptr) return; readmem(ip_ptr + OFFSET(in_device_ifa_list), KVADDR, &ifa_list, sizeof(ulong), "ifa_list", FAULT_ON_ERROR); while (ifa_list) { readmem(ifa_list + OFFSET(in_ifaddr_ifa_address), KVADDR, &ifa_address, sizeof(struct in_addr), "ifa_address", FAULT_ON_ERROR); sprintf(&buf[strlen(buf)], "%s%s", strlen(buf) ? ", " : "", inet_ntoa(ifa_address)); readmem(ifa_list + OFFSET(in_ifaddr_ifa_next), KVADDR, &ifa_list, sizeof(ulong), "ifa_next", FAULT_ON_ERROR); } } /* * Get the family, type, local and destination address/port pairs. */ static void get_sock_info(ulong sock, char *buf) { uint32_t daddr, rcv_saddr; uint16_t dport, sport; ushort family, type; ushort num ATTRIBUTE_UNUSED; char *sockbuf, *inet_sockbuf; ulong ipv6_pinfo, ipv6_rcv_saddr, ipv6_daddr; uint16_t u6_addr16_src[8]; uint16_t u6_addr16_dest[8]; char buf2[BUFSIZE]; struct in_addr in_addr; int len; BZERO(buf, BUFSIZE); BZERO(buf2, BUFSIZE); sockbuf = inet_sockbuf = NULL; rcv_saddr = daddr = 0; dport = sport = 0; family = type = 0; ipv6_pinfo = 0; switch (net->flags & (SOCK_V1|SOCK_V2)) { case SOCK_V1: sockbuf = GETBUF(SIZE(sock)); readmem(sock, KVADDR, sockbuf, SIZE(sock), "sock buffer", FAULT_ON_ERROR); daddr = UINT(sockbuf + OFFSET(sock_daddr)); rcv_saddr = UINT(sockbuf + OFFSET(sock_rcv_saddr)); dport = USHORT(sockbuf + OFFSET(sock_dport)); sport = USHORT(sockbuf + OFFSET(sock_sport)); num = USHORT(sockbuf + OFFSET(sock_num)); family = USHORT(sockbuf + OFFSET(sock_family)); type = USHORT(sockbuf + OFFSET(sock_type)); break; case SOCK_V2: inet_sockbuf = GETBUF(SIZE(inet_sock)); readmem(sock, KVADDR, inet_sockbuf, SIZE(inet_sock), "inet_sock buffer", FAULT_ON_ERROR); daddr = UINT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_daddr)); rcv_saddr = UINT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_rcv_saddr)); dport = USHORT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_dport)); sport = USHORT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_sport)); num = USHORT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_num)); family = USHORT(inet_sockbuf + OFFSET(sock_common_skc_family)); type = USHORT(inet_sockbuf + OFFSET(sock_sk_type)); ipv6_pinfo = ULONG(inet_sockbuf + SIZE(sock)); break; } switch (family) { case AF_UNSPEC: sprintf(buf, "UNSPEC:"); break; case AF_UNIX: sprintf(buf, "UNIX:"); break; case AF_INET: sprintf(buf, "INET:"); break; case AF_AX25: sprintf(buf, "AX25:"); break; case AF_IPX: sprintf(buf, "IPX:"); break; case AF_APPLETALK: sprintf(buf, "APPLETALK:"); break; case AF_NETROM: sprintf(buf, "NETROM:"); break; case AF_BRIDGE: sprintf(buf, "BRIDGE:"); break; case AF_ATMPVC: sprintf(buf, "ATMPVC:"); break; case AF_X25: sprintf(buf, "X25:"); break; case AF_INET6: sprintf(buf, "INET6:"); break; case AF_ROSE: sprintf(buf, "ROSE:"); break; case AF_DECnet: sprintf(buf, "DECnet:"); break; case AF_NETBEUI: sprintf(buf, "NETBEUI:"); break; case AF_SECURITY: sprintf(buf, "SECURITY/KEY:"); break; case AF_NETLINK: sprintf(buf, "NETLINK/ROUTE:"); break; case AF_PACKET: sprintf(buf, "PACKET:"); break; case AF_ASH: sprintf(buf, "ASH:"); break; case AF_ECONET: sprintf(buf, "ECONET:"); break; case AF_ATMSVC: sprintf(buf, "ATMSVC:"); break; case AF_SNA: sprintf(buf, "SNA:"); break; case AF_IRDA: sprintf(buf, "IRDA:"); break; #ifndef AF_PPPOX #define AF_PPPOX 24 #endif case AF_PPPOX: sprintf(buf, "PPPOX:"); break; default: sprintf(buf, "%d:", family); break; } switch (type) { case SOCK_STREAM: sprintf(&buf[strlen(buf)], "STREAM"); break; case SOCK_DGRAM: sprintf(&buf[strlen(buf)], "DGRAM "); break; case SOCK_RAW: sprintf(&buf[strlen(buf)], "RAW"); break; case SOCK_RDM: sprintf(&buf[strlen(buf)], "RDM"); break; case SOCK_SEQPACKET: sprintf(&buf[strlen(buf)], "SEQPACKET"); break; case SOCK_PACKET: sprintf(&buf[strlen(buf)], "PACKET"); break; default: sprintf(&buf[strlen(buf)], "%d", type); break; } /* make sure we have room at the end... */ // sprintf(&buf[strlen(buf)], "%s", space(MINSPACE-1)); sprintf(&buf[strlen(buf)], " "); if (family == AF_INET) { if (BITS32()) { in_addr.s_addr = rcv_saddr; sprintf(&buf[strlen(buf)], "%*s-%-*d%s", BYTES_IP_ADDR, inet_ntoa(in_addr), BYTES_PORT_NUM, ntohs(sport), space(1)); in_addr.s_addr = daddr; sprintf(&buf[strlen(buf)], "%*s-%-*d%s", BYTES_IP_ADDR, inet_ntoa(in_addr), BYTES_PORT_NUM, ntohs(dport), space(1)); } else { in_addr.s_addr = rcv_saddr; sprintf(&buf[strlen(buf)], " %s-%d ", inet_ntoa(in_addr), ntohs(sport)); in_addr.s_addr = daddr; sprintf(&buf[strlen(buf)], "%s-%d", inet_ntoa(in_addr), ntohs(dport)); } } if (sockbuf) FREEBUF(sockbuf); if (inet_sockbuf) FREEBUF(inet_sockbuf); if (family != AF_INET6) return; switch (net->flags & (SOCK_V1|SOCK_V2)) { case SOCK_V1: break; case SOCK_V2: if (INVALID_MEMBER(ipv6_pinfo_rcv_saddr) || INVALID_MEMBER(ipv6_pinfo_daddr)) break; ipv6_rcv_saddr = ipv6_pinfo + OFFSET(ipv6_pinfo_rcv_saddr); ipv6_daddr = ipv6_pinfo + OFFSET(ipv6_pinfo_daddr); if (!readmem(ipv6_rcv_saddr, KVADDR, u6_addr16_src, SIZE(in6_addr), "ipv6_rcv_saddr buffer", QUIET|RETURN_ON_ERROR)) break; if (!readmem(ipv6_daddr, KVADDR, u6_addr16_dest, SIZE(in6_addr), "ipv6_daddr buffer", QUIET|RETURN_ON_ERROR)) break; sprintf(&buf[strlen(buf)], "%*s ", BITS32() ? 22 : 12, dump_in6_addr_port(u6_addr16_src, sport, buf2, &len)); if (BITS32() && (len > 22)) len = 1; mkstring(dump_in6_addr_port(u6_addr16_dest, dport, buf2, NULL), len, CENTER, NULL); sprintf(&buf[strlen(buf)], "%s", buf2); break; } } static char * dump_in6_addr_port(uint16_t *addr, uint16_t port, char *buf, int *len) { sprintf(buf, "%x:%x:%x:%x:%x:%x:%x:%x-%d", ntohs(addr[0]), ntohs(addr[1]), ntohs(addr[2]), ntohs(addr[3]), ntohs(addr[4]), ntohs(addr[5]), ntohs(addr[6]), ntohs(addr[7]), ntohs(port)); if (len) *len = strlen(buf); return buf; } /* * XXX - copied from neighbour.h !!!!!! * * Neighbor Cache Entry States. */ #define NUD_INCOMPLETE 0x01 #define NUD_REACHABLE 0x02 #define NUD_STALE 0x04 #define NUD_DELAY 0x08 #define NUD_PROBE 0x10 #define NUD_FAILED 0x20 #define NUD_NOARP 0x40 #define NUD_PERMANENT 0x80 #define FLAGBUF_SIZE 100 #define FILLBUF(s) \ do { \ char *bp; \ int blen; \ blen=strlen(flag_buffer); \ if ((blen + strlen(s)) < FLAGBUF_SIZE-2) { \ bp = &flag_buffer[blen]; \ if (blen != 0) { \ sprintf(bp, "|%s", (s)); \ } else { \ sprintf(bp, "%s", (s)); \ } \ } \ } while(0) /* * Take the state of the ARP entry and print it out the flag associated * with the binary state... */ static void arp_state_to_flags(unsigned char state) { char flag_buffer[FLAGBUF_SIZE]; int had_flags = 0; if (!state) { fprintf(fp, "\n"); return; } bzero(flag_buffer, FLAGBUF_SIZE); if (state & NUD_INCOMPLETE) { FILLBUF("INCOMPLETE"); had_flags = 1; } if (state & NUD_REACHABLE) { FILLBUF("REACHABLE"); had_flags = 1; } if (state & NUD_STALE) { FILLBUF("STALE"); had_flags = 1; } if (state & NUD_DELAY) { FILLBUF("DELAY"); had_flags = 1; } if (state & NUD_PROBE) { FILLBUF("PROBE"); had_flags = 1; } if (state & NUD_FAILED) { FILLBUF("FAILED"); had_flags = 1; } if (state & NUD_NOARP) { FILLBUF("NOARP"); had_flags = 1; } if (state & NUD_PERMANENT) { FILLBUF("PERMANENT"); had_flags = 1; } if (had_flags) { fprintf(fp, "%s\n", flag_buffer); /* fprintf(fp, "%29.29s%s)\n", " ", flag_buffer); */ } } #undef FILLBUF /* * Print out a formatted ethernet HW address.... */ static void dump_ether_hw(unsigned char *ha, int len) { int i; for (i = 0; i < len; i++) { char sep = ':'; if (i == (len - 1)) { sep = ' '; } fprintf(fp, "%02x%c", ha[i], sep); } } /* * Catchall routine for dumping out a HA address whose format we * don't know about... */ static void dump_hw_addr(unsigned char *ha, int len) { int i; for (i = 0; i < len; i++) { fprintf(fp, "%02x ", ha[i]); } } /* * help -n output */ void dump_net_table(void) { int others; others = 0; fprintf(fp, " flags: %lx (", net->flags); if (net->flags & NETDEV_INIT) fprintf(fp, "%sNETDEV_INIT", others++ ? "|" : ""); if (net->flags & STRUCT_DEVICE) fprintf(fp, "%sSTRUCT_DEVICE", others++ ? "|" : ""); if (net->flags & STRUCT_NET_DEVICE) fprintf(fp, "%sSTRUCT_NET_DEVICE", others++ ? "|" : ""); if (net->flags & NO_INET_SOCK) fprintf(fp, "%sNO_INET_SOCK", others++ ? "|" : ""); if (net->flags & SOCK_V1) fprintf(fp, "%sSOCK_V1", others++ ? "|" : ""); if (net->flags & SOCK_V2) fprintf(fp, "%sSOCK_V2", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " netdevice: \"%s\"\n", net->netdevice); fprintf(fp, " dev_name_t: \"%s\"\n", net->dev_name_t); fprintf(fp, " dev_type_t: \"%s\"\n", net->dev_type_t); fprintf(fp, " dev_addr_t: \"%s\"\n", net->dev_addr_t); fprintf(fp, " dev_name: %ld\n", net->dev_name); fprintf(fp, " dev_next: %ld\n", net->dev_next); fprintf(fp, " dev_type: %ld\n", net->dev_type); fprintf(fp, " dev_ip_ptr: %ld\n", net->dev_ip_ptr); fprintf(fp, " dev_addr_len: %ld\n", net->dev_addr_len); fprintf(fp, "net_device_name_index: %d\n", net->net_device_name_index); } /* * Dump the open sockets for a given PID. */ static void dump_sockets(ulong flag, struct reference *ref) { struct task_context *tc; ulong value; int subsequent; if (!args[optind]) { if (!NET_REFERENCE_CHECK(ref)) print_task_header(fp, CURRENT_CONTEXT(), 0); dump_sockets_workhorse(CURRENT_TASK(), flag, ref); return; } subsequent = 0; while (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!NET_REFERENCE_CHECK(ref)) print_task_header(fp, tc, subsequent++); dump_sockets_workhorse(tc->task, flag, ref); } break; case STR_TASK: if (!NET_REFERENCE_CHECK(ref)) print_task_header(fp, tc, subsequent++); dump_sockets_workhorse(tc->task, flag, ref); break; case STR_INVALID: error(INFO, "%sinvalid task or pid value: %s\n", subsequent++ ? "\n" : "", args[optind]); break; } optind++; } } /* * Find all sockets in the designated task and call sym_socket_dump() * to display them. */ void dump_sockets_workhorse(ulong task, ulong flag, struct reference *ref) { ulong files_struct_addr = 0, fdtable_addr = 0; int max_fdset = 0; int max_fds = 0; ulong open_fds_addr = 0; fd_set open_fds; ulong fd; ulong file; int i, j; int sockets_found = 0; ulong value; /* * Steps to getting open sockets: * * 1) task->files (struct files_struct) * 2) files->fd (struct file **) * 3) cycle through from 0 to files->open_fds offset from *fd * i.e. fd[0], fd[1], fd[2] are pointers to the first three * open file descriptors. Thus, we have: * struct file *fd[0], *fd[1], *fd[2],... * * 4) file->f_dentry (struct dentry) * 5) dentry->d_inode (struct inode) * 6) S_ISSOCK(inode.mode) * Assuming it _is_ a socket: * 7) inode.u (struct socket) -- offset 0xdc from inode pointer */ readmem(task + OFFSET(task_struct_files), KVADDR, &files_struct_addr, sizeof(void *), "task files contents", FAULT_ON_ERROR); if (files_struct_addr) { if (VALID_MEMBER(files_struct_max_fdset)) { readmem(files_struct_addr + OFFSET(files_struct_max_fdset), KVADDR, &max_fdset, sizeof(int), "files_struct max_fdset", FAULT_ON_ERROR); readmem(files_struct_addr + OFFSET(files_struct_max_fds), KVADDR, &max_fds, sizeof(int), "files_struct max_fds", FAULT_ON_ERROR); } else if (VALID_MEMBER(files_struct_fdt)) { readmem(files_struct_addr + OFFSET(files_struct_fdt), KVADDR, &fdtable_addr, sizeof(void *), "fdtable buffer", FAULT_ON_ERROR); if (VALID_MEMBER(fdtable_max_fdset)) readmem(fdtable_addr + OFFSET(fdtable_max_fdset), KVADDR, &max_fdset, sizeof(int), "fdtable_struct max_fdset", FAULT_ON_ERROR); else max_fdset = -1; readmem(fdtable_addr + OFFSET(fdtable_max_fds), KVADDR, &max_fds, sizeof(int), "fdtable_struct max_fds", FAULT_ON_ERROR); } } if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || !files_struct_addr || (max_fdset == 0) || (max_fds == 0)) { if (!NET_REFERENCE_CHECK(ref)) fprintf(fp, "No open sockets.\n"); return; } if (VALID_MEMBER(fdtable_open_fds)){ readmem(fdtable_addr + OFFSET(fdtable_open_fds), KVADDR, &open_fds_addr, sizeof(void *), "files_struct open_fds addr", FAULT_ON_ERROR); readmem(fdtable_addr + OFFSET(fdtable_fd), KVADDR, &fd, sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); } else { readmem(files_struct_addr + OFFSET(files_struct_open_fds), KVADDR, &open_fds_addr, sizeof(void *), "files_struct open_fds addr", FAULT_ON_ERROR); readmem(files_struct_addr + OFFSET(files_struct_fd), KVADDR, &fd, sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); } if (open_fds_addr) readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set), "files_struct open_fds", FAULT_ON_ERROR); if (!open_fds_addr || !fd) { if (!NET_REFERENCE_CHECK(ref)) fprintf(fp, "No open sockets.\n"); return; } if (NET_REFERENCE_CHECK(ref)) { if (IS_A_NUMBER(ref->str)) { if (hexadecimal_only(ref->str, 0)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= NET_REF_HEXNUM; } else { value = dtol(ref->str, FAULT_ON_ERROR, NULL); if (value <= MAX(max_fdset, max_fds)) { ref->decval = value; ref->cmdflags |= NET_REF_DECNUM; } else { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= NET_REF_HEXNUM; } } } ref->ref1 = task; } j = 0; for (;;) { unsigned long set; i = j * __NFDBITS; if (((max_fdset >= 0) && (i >= max_fdset)) || (i >= max_fds)) break; set = open_fds.__fds_bits[j++]; while (set) { if (set & 1) { readmem(fd + i*sizeof(struct file *), KVADDR, &file, sizeof(struct file *), "fd file", FAULT_ON_ERROR); if (file) { if (sym_socket_dump(file, i, sockets_found, flag, ref)) { sockets_found++; } } } i++; set >>= 1; } } if (!sockets_found && !NET_REFERENCE_CHECK(ref)) fprintf(fp, "No open sockets.\n"); if (NET_REFERENCE_FOUND(ref)) fprintf(fp, "\n"); } /* * Dump a struct socket symbolically. Dave makes this _very_ easy. * * Return TRUE if we found a socket, FALSE otherwise. */ static char *socket_hdr_32 = "FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT"; static char *socket_hdr_64 = "FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT"; static int sym_socket_dump(ulong file, int fd, int sockets_found, ulong flag, struct reference *ref) { uint16_t umode16 = 0; uint32_t umode32 = 0; uint mode = 0; ulong dentry = 0, inode = 0, struct_socket = 0; ulong sock = 0; char *file_buf, *dentry_buf, *inode_buf, *socket_buf; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *socket_hdr = BITS32() ? socket_hdr_32 : socket_hdr_64; unsigned int radix; file_buf = fill_file_cache(file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); if (flag & d_FLAG) radix = 10; else if (flag & x_FLAG) radix = 16; else radix = 0; if (!dentry) return FALSE; dentry_buf = fill_dentry_cache(dentry); inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); if (!inode) return FALSE; inode_buf = fill_inode_cache(inode); switch (SIZE(umode_t)) { case SIZEOF_32BIT: umode32 = UINT(inode_buf + OFFSET(inode_i_mode)); break; case SIZEOF_16BIT: umode16 = USHORT(inode_buf + OFFSET(inode_i_mode)); break; } if (SIZE(umode_t) == SIZEOF_32BIT) mode = umode32; else mode = (uint)umode16; if (!S_ISSOCK(mode)) return FALSE; /* * 2.6 (SOCK_V2) -- socket is inode addr minus sizeof(struct socket) */ switch (net->flags & (SOCK_V1|SOCK_V2)) { case SOCK_V1: struct_socket = inode + OFFSET(inode_u); sock = ULONG(inode_buf + OFFSET(inode_u) + OFFSET(socket_sk)); break; case SOCK_V2: if (!VALID_SIZE(inet_sock)) error(FATAL, "cannot determine what an inet_sock structure is\n"); struct_socket = inode - OFFSET(socket_alloc_vfs_inode); socket_buf = GETBUF(SIZE(socket)); readmem(struct_socket, KVADDR, socket_buf, SIZE(socket), "socket buffer", FAULT_ON_ERROR); sock = ULONG(socket_buf + OFFSET(socket_sk)); FREEBUF(socket_buf); break; } if (NET_REFERENCE_CHECK(ref)) { if ((ref->cmdflags & NET_REF_HEXNUM) && ((ref->hexval == sock) || (ref->hexval == struct_socket))) ref->cmdflags |= NET_REF_FOUND_ITEM; else if ((ref->cmdflags & NET_REF_DECNUM) && (ref->decval == (ulong)fd)) ref->cmdflags |= NET_REF_FOUND_ITEM; else if ((ref->cmdflags & NET_REF_HEXNUM) && (ref->hexval == (ulong)fd)) ref->cmdflags |= NET_REF_FOUND_ITEM; if (!(ref->cmdflags & NET_REF_FOUND_ITEM)) return FALSE; ref->cmdflags &= ~NET_REF_FOUND_ITEM; ref->cmdflags |= NET_REF_FOUND; if (!(ref->cmdflags & NET_TASK_HEADER_PRINTED)) { print_task_header(fp, task_to_context(ref->ref1), 0); ref->cmdflags |= NET_TASK_HEADER_PRINTED; } if (!(ref->cmdflags & NET_SOCK_HEADER_PRINTED)) { sockets_found = 0; ref->cmdflags |= NET_SOCK_HEADER_PRINTED; } } switch (flag & (S_FLAG|s_FLAG)) { case S_FLAG: fprintf(fp, "%sFD %s %s\n", sockets_found ? "\n" : "", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SOCKET"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "SOCK")); fprintf(fp, "%2d %s %s\n\n", fd, mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(struct_socket)), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(sock))); dump_struct("socket", struct_socket, radix); switch (net->flags & (SOCK_V1|SOCK_V2)) { case SOCK_V1: dump_struct("sock", sock, radix); break; case SOCK_V2: if (STRUCT_EXISTS("inet_sock") && !(net->flags & NO_INET_SOCK)) dump_struct("inet_sock", sock, radix); else if (STRUCT_EXISTS("sock")) dump_struct("sock", sock, radix); else fprintf(fp, "\nunable to display inet_sock structure\n"); break; } break; case s_FLAG: if (!sockets_found) { fprintf(fp, "%s\n", socket_hdr); } fprintf(fp, "%2d%s%s%s%s%s", fd, space(MINSPACE), mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(struct_socket)), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(sock)), space(MINSPACE)); buf1[0] = NULLCHAR; get_sock_info(sock, buf1); fprintf(fp, "%s\n", buf1); return TRUE; default: error(FATAL, "illegal flag: %lx\n", flag); } return TRUE; } crash-7.1.4/xen_hyper_dump_tables.c0000664000000000000000000011311312634305150016046 0ustar rootroot/* * xen_hyper_dump_tables.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" static void xen_hyper_dump_xen_hyper_table(int verbose); static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose); static void xen_hyper_dump_xen_hyper_domain_table(int verbose); static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose); static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose); static void xen_hyper_dump_xen_hyper_sched_table(int verbose); static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct); static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct); static void xen_hyper_dump_mem(void *mem, ulong len, int dsz); /* * Get help for a command, to dump an internal table, or the GNU public * license copying/warranty information. */ void xen_hyper_cmd_help(void) { int c; int oflag; oflag = 0; while ((c = getopt(argcnt, args, "aBbcDgHhM:mnOopszX:")) != EOF) { switch(c) { case 'a': dump_alias_data(); return; case 'b': dump_shared_bufs(); return; case 'B': dump_build_data(); return; case 'c': dump_numargs_cache(); return; case 'n': case 'D': dumpfile_memory(DUMPFILE_MEM_DUMP); return; case 'g': dump_gdb_data(); return; case 'H': dump_hash_table(VERBOSE); return; case 'h': dump_hash_table(!VERBOSE); return; case 'M': dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); return; case 'm': dump_machdep_table(0); return; case 'O': dump_offset_table(NULL, TRUE); return; case 'o': oflag = TRUE; break; case 'p': dump_program_context(); return; case 's': dump_symbol_table(); return; case 'X': if (strlen(optarg) != 3) { argerrs++; break; } if (!strncmp("Xen", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_table(VERBOSE); else if (!strncmp("xen", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_table(!VERBOSE); else if (!strncmp("Dmp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_dumpinfo_table(VERBOSE); else if (!strncmp("dmp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_dumpinfo_table(!VERBOSE); else if (!strncmp("Dom", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_domain_table(VERBOSE); else if (!strncmp("dom", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_domain_table(!VERBOSE); else if (!strncmp("Vcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_vcpu_table(VERBOSE); else if (!strncmp("vcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_vcpu_table(!VERBOSE); else if (!strncmp("Pcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_pcpu_table(VERBOSE); else if (!strncmp("pcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_pcpu_table(!VERBOSE); else if (!strncmp("Sch", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_sched_table(VERBOSE); else if (!strncmp("sch", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_sched_table(!VERBOSE); else if (!strncmp("siz", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_size_table(NULL, TRUE); else if (!strncmp("ofs", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_offset_table(NULL, TRUE); else { argerrs++; break; } return; case 'z': fprintf(fp, "help options:\n"); fprintf(fp, " -a - alias data\n"); fprintf(fp, " -b - shared buffer data\n"); fprintf(fp, " -B - build data\n"); fprintf(fp, " -c - numargs cache\n"); fprintf(fp, " -M machine specific\n"); fprintf(fp, " -m - machdep_table\n"); fprintf(fp, " -s - symbol table data\n"); fprintf(fp, " -o - offset_table and size_table\n"); fprintf(fp, " -p - program_context\n"); fprintf(fp, " -h - hash_table data\n"); fprintf(fp, " -H - hash_table data (verbose)\n"); fprintf(fp, " -X Xen - xen table data (verbose)\n"); fprintf(fp, " -X xen - xen table data\n"); fprintf(fp, " -X Dmp - dumpinfo table data (verbose)\n"); fprintf(fp, " -X dmp - dumpinfo table data\n"); fprintf(fp, " -X Dom - domain table data (verbose)\n"); fprintf(fp, " -X dom - domain table data\n"); fprintf(fp, " -X Vcp - vcpu table data (verbose)\n"); fprintf(fp, " -X vcp - vcpu table data\n"); fprintf(fp, " -X Pcp - pcpu table data (verbose)\n"); fprintf(fp, " -X pcp - pcpu table data\n"); fprintf(fp, " -X Sch - schedule table data (verbose)\n"); fprintf(fp, " -X sch - schedule table data\n"); fprintf(fp, " -X siz - size table data\n"); fprintf(fp, " -X ofs - offset table data\n"); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, COMPLETE_HELP); if (!args[optind]) { if (oflag) dump_offset_table(NULL, FALSE); else display_help_screen(""); return; } do { if (oflag) dump_offset_table(args[optind], FALSE); else cmd_usage(args[optind], COMPLETE_HELP); optind++; } while (args[optind]); } /* * "help -x xen" output */ static void xen_hyper_dump_xen_hyper_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; uint cpuid; int len, flag, i; len = 14; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "cpu_data_address: ", buf, flag, (buf, "%lu\n", xht->cpu_data_address)); XEN_HYPER_PRI(fp, len, "cpu_curr: ", buf, flag, (buf, "%u\n", xht->cpu_curr)); XEN_HYPER_PRI(fp, len, "max_cpus: ", buf, flag, (buf, "%u\n", xht->max_cpus)); XEN_HYPER_PRI(fp, len, "cores: ", buf, flag, (buf, "%d\n", xht->cores)); XEN_HYPER_PRI(fp, len, "pcpus: ", buf, flag, (buf, "%d\n", xht->pcpus)); XEN_HYPER_PRI(fp, len, "vcpus: ", buf, flag, (buf, "%d\n", xht->vcpus)); XEN_HYPER_PRI(fp, len, "domains: ", buf, flag, (buf, "%d\n", xht->domains)); XEN_HYPER_PRI(fp, len, "sys_pages: ", buf, flag, (buf, "%lu\n", xht->sys_pages)); XEN_HYPER_PRI(fp, len, "crashing_cpu: ", buf, flag, (buf, "%d\n", xht->crashing_cpu)); XEN_HYPER_PRI(fp, len, "crashing_vcc: ", buf, flag, (buf, "%p\n", xht->crashing_vcc)); XEN_HYPER_PRI(fp, len, "max_page: ", buf, flag, (buf, "%lu\n", xht->max_page)); XEN_HYPER_PRI(fp, len, "total_pages: ", buf, flag, (buf, "%lu\n", xht->total_pages)); XEN_HYPER_PRI(fp, len, "cpumask: ", buf, flag, (buf, "%p\n", xht->cpumask)); if (verbose && xht->cpumask) { xen_hyper_dump_mem(xht->cpumask, XEN_HYPER_SIZE(cpumask_t), sizeof(long)); } XEN_HYPER_PRI(fp, len, "cpu_idxs: ", buf, flag, (buf, "%p\n", xht->cpu_idxs)); if (verbose) { for_cpu_indexes(i, cpuid) fprintf(fp, "%03d : %d\n", i, cpuid); } } /* * "help -x dmp" output */ static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 25; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "note_ver: ", buf, flag, (buf, "%u\n", xhdit->note_ver)); XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhdit->context_array)); if (verbose && xhdit->context_array) { xen_hyper_dump_mem((long *)xhdit->context_array, sizeof(struct xen_hyper_dumpinfo_context) * XEN_HYPER_MAX_CPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "context_xen_core_array: ", buf, flag, (buf, "%p\n", xhdit->context_xen_core_array)); if (verbose && xhdit->context_xen_core_array) { xen_hyper_dump_mem((long *)xhdit->context_xen_core_array, sizeof(struct xen_hyper_dumpinfo_context_xen_core) * XEN_HYPER_MAX_CPUS(), sizeof(long)); } XEN_HYPER_PRI_CONST(fp, len, "context_xen_info: ", flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "note: ", buf, flag, (buf, "%lx\n", xhdit->context_xen_info.note)); XEN_HYPER_PRI(fp, len, "pcpu_id: ", buf, flag, (buf, "%u\n", xhdit->context_xen_info.pcpu_id)); XEN_HYPER_PRI(fp, len, "crash_xen_info_ptr: ", buf, flag, (buf, "%p\n", xhdit->context_xen_info.crash_xen_info_ptr)); XEN_HYPER_PRI(fp, len, "crash_note_core_array: ", buf, flag, (buf, "%p\n", xhdit->crash_note_core_array)); if (verbose && xhdit->crash_note_core_array) { xen_hyper_dump_mem((long *)xhdit->crash_note_core_array, xhdit->core_size * XEN_HYPER_NR_PCPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "crash_note_xen_core_array: ", buf, flag, (buf, "%p\n", xhdit->crash_note_xen_core_array)); if (verbose && xhdit->crash_note_xen_core_array) { xen_hyper_dump_mem( xhdit->crash_note_xen_core_array, xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "crash_note_xen_info_ptr: ", buf, flag, (buf, "%p\n", xhdit->crash_note_xen_info_ptr)); if (verbose && xhdit->crash_note_xen_info_ptr) { xen_hyper_dump_mem( xhdit->crash_note_xen_info_ptr, xhdit->xen_info_size, sizeof(long)); } XEN_HYPER_PRI(fp, len, "xen_info_cpu: ", buf, flag, (buf, "%u\n", xhdit->xen_info_cpu)); XEN_HYPER_PRI(fp, len, "note_size: ", buf, flag, (buf, "%u\n", xhdit->note_size)); XEN_HYPER_PRI(fp, len, "core_offset: ", buf, flag, (buf, "%u\n", xhdit->core_offset)); XEN_HYPER_PRI(fp, len, "core_size: ", buf, flag, (buf, "%u\n", xhdit->core_size)); XEN_HYPER_PRI(fp, len, "xen_core_offset: ", buf, flag, (buf, "%u\n", xhdit->xen_core_offset)); XEN_HYPER_PRI(fp, len, "xen_core_size: ", buf, flag, (buf, "%u\n", xhdit->xen_core_size)); XEN_HYPER_PRI(fp, len, "xen_info_offset: ", buf, flag, (buf, "%u\n", xhdit->xen_info_offset)); XEN_HYPER_PRI(fp, len, "xen_info_size: ", buf, flag, (buf, "%u\n", xhdit->xen_info_size)); } /* * "help -x dom" output */ static void xen_hyper_dump_xen_hyper_domain_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_domain_context *dcca; int len, flag, i; len = 22; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhdt->context_array)); if (verbose) { char buf1[XEN_HYPER_CMD_BUFSIZE]; int j; for (i = 0, dcca = xhdt->context_array; i < xhdt->context_array_cnt; i++, dcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%lx\n", dcca->domain)); XEN_HYPER_PRI(fp, len, "domain_id: ", buf, flag, (buf, "%d\n", dcca->domain_id)); XEN_HYPER_PRI(fp, len, "tot_pages: ", buf, flag, (buf, "%x\n", dcca->tot_pages)); XEN_HYPER_PRI(fp, len, "max_pages: ", buf, flag, (buf, "%x\n", dcca->max_pages)); XEN_HYPER_PRI(fp, len, "xenheap_pages: ", buf, flag, (buf, "%x\n", dcca->xenheap_pages)); XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, (buf, "%lx\n", dcca->shared_info)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", dcca->sched_priv)); XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, (buf, "%lx\n", dcca->next_in_list)); XEN_HYPER_PRI(fp, len, "domain_flags: ", buf, flag, (buf, "%lx\n", dcca->domain_flags)); XEN_HYPER_PRI(fp, len, "evtchn: ", buf, flag, (buf, "%lx\n", dcca->evtchn)); XEN_HYPER_PRI(fp, len, "vcpu_cnt: ", buf, flag, (buf, "%d\n", dcca->vcpu_cnt)); for (j = 0; j < XEN_HYPER_MAX_VIRT_CPUS; j++) { snprintf(buf1, XEN_HYPER_CMD_BUFSIZE, "vcpu[%d]: ", j); XEN_HYPER_PRI(fp, len, buf1, buf, flag, (buf, "%lx\n", dcca->vcpu[j])); } XEN_HYPER_PRI(fp, len, "vcpu_context_array: ", buf, flag, (buf, "%p\n", dcca->vcpu_context_array)); } } XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, (buf, "%d\n", xhdt->context_array_cnt)); XEN_HYPER_PRI(fp, len, "running_domains: ", buf, flag, (buf, "%lu\n", xhdt->running_domains)); XEN_HYPER_PRI(fp, len, "dom_io: ", buf, flag, (buf, "%p\n", xhdt->dom_io)); XEN_HYPER_PRI(fp, len, "dom_xen: ", buf, flag, (buf, "%p\n", xhdt->dom_xen)); XEN_HYPER_PRI(fp, len, "dom0: ", buf, flag, (buf, "%p\n", xhdt->dom0)); XEN_HYPER_PRI(fp, len, "idle_domain: ", buf, flag, (buf, "%p\n", xhdt->idle_domain)); XEN_HYPER_PRI(fp, len, "curr_domain: ", buf, flag, (buf, "%p\n", xhdt->curr_domain)); XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhdt->last)); XEN_HYPER_PRI(fp, len, "domain_struct: ", buf, flag, (buf, "%p\n", xhdt->domain_struct)); XEN_HYPER_PRI(fp, len, "domain_struct_verify: ", buf, flag, (buf, "%p\n", xhdt->domain_struct_verify)); } /* * "help -x vcp" output */ static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 25; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "vcpu_context_arrays: ", buf, flag, (buf, "%p\n", xhvct->vcpu_context_arrays)); XEN_HYPER_PRI(fp, len, "vcpu_context_arrays_cnt: ", buf, flag, (buf, "%d\n", xhvct->vcpu_context_arrays_cnt)); if (verbose) { struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vca; int i, j; for (i = 0, vcca = xhvct->vcpu_context_arrays; i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "vcpu_context_arrays[%d]: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); if (vcca->context_array) { XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", vcca->context_array)); } else { XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "NULL\n")); } XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, (buf, "%d\n", vcca->context_array_cnt)); XEN_HYPER_PRI(fp, len, "context_array_valid: ", buf, flag, (buf, "%d\n", vcca->context_array_valid)); for (j = 0, vca = vcca->context_array; j < vcca->context_array_cnt; j++, vca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", j); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, (buf, "%lx\n", vca->vcpu)); XEN_HYPER_PRI(fp, len, "vcpu_id: ", buf, flag, (buf, "%d\n", vca->vcpu_id)); XEN_HYPER_PRI(fp, len, "processor: ", buf, flag, (buf, "%d\n", vca->processor)); XEN_HYPER_PRI(fp, len, "vcpu_info: ", buf, flag, (buf, "%lx\n", vca->vcpu_info)); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%lx\n", vca->domain)); XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, (buf, "%lx\n", vca->next_in_list)); XEN_HYPER_PRI(fp, len, "sleep_tick: ", buf, flag, (buf, "%lx\n", vca->sleep_tick)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", vca->sched_priv)); XEN_HYPER_PRI(fp, len, "state: ", buf, flag, (buf, "%d\n", vca->state)); XEN_HYPER_PRI(fp, len, "state_entry_time: ", buf, flag, (buf, "%llux\n", (unsigned long long)(vca->state_entry_time))); XEN_HYPER_PRI(fp, len, "runstate_guest: ", buf, flag, (buf, "%lx\n", vca->runstate_guest)); XEN_HYPER_PRI(fp, len, "vcpu_flags: ", buf, flag, (buf, "%lx\n", vca->vcpu_flags)); } } } XEN_HYPER_PRI(fp, len, "idle_vcpu: ", buf, flag, (buf, "%lx\n", xhvct->idle_vcpu)); XEN_HYPER_PRI(fp, len, "idle_vcpu_context_array: ", buf, flag, (buf, "%p\n", xhvct->idle_vcpu_context_array)); XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhvct->last)); XEN_HYPER_PRI(fp, len, "vcpu_struct: ", buf, flag, (buf, "%p\n", xhvct->vcpu_struct)); XEN_HYPER_PRI(fp, len, "vcpu_struct_verify: ", buf, flag, (buf, "%p\n", xhvct->vcpu_struct_verify)); } /* * "help -x pcp" output */ static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_pcpu_context *pcca; int len, flag, i; #ifdef X86_64 uint64_t *ist_p; int j; #endif len = 21; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhpct->context_array)); if (verbose) { for (i = 0, pcca = xhpct->context_array; i < XEN_HYPER_MAX_CPUS(); i++, pcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array %d: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "pcpu: ", buf, flag, (buf, "%lx\n", pcca->pcpu)); XEN_HYPER_PRI(fp, len, "processor_id: ", buf, flag, (buf, "%u\n", pcca->processor_id)); XEN_HYPER_PRI(fp, len, "guest_cpu_user_regs: ", buf, flag, (buf, "%lx\n", pcca->guest_cpu_user_regs)); XEN_HYPER_PRI(fp, len, "current_vcpu: ", buf, flag, (buf, "%lx\n", pcca->current_vcpu)); XEN_HYPER_PRI(fp, len, "init_tss: ", buf, flag, (buf, "%lx\n", pcca->init_tss)); #ifdef X86 XEN_HYPER_PRI(fp, len, "sp.esp0: ", buf, flag, (buf, "%x\n", pcca->sp.esp0)); #endif #ifdef X86_64 XEN_HYPER_PRI(fp, len, "sp.rsp0: ", buf, flag, (buf, "%lx\n", pcca->sp.rsp0)); for (j = 0, ist_p = pcca->ist; j < XEN_HYPER_TSS_IST_MAX; j++, ist_p++) { XEN_HYPER_PRI(fp, len, "ist: ", buf, flag, (buf, "%lx\n", *ist_p)); } #endif } } XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhpct->last)); XEN_HYPER_PRI(fp, len, "pcpu_struct: ", buf, flag, (buf, "%p\n", xhpct->pcpu_struct)); } /* * "help -x sch" output */ static void xen_hyper_dump_xen_hyper_sched_table(int verbose) { struct xen_hyper_sched_context *schc; char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag, i; len = 21; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "name: ", buf, flag, (buf, "%s\n", xhscht->name)); XEN_HYPER_PRI(fp, len, "opt_sched: ", buf, flag, (buf, "%s\n", xhscht->opt_sched)); XEN_HYPER_PRI(fp, len, "sched_id: ", buf, flag, (buf, "%d\n", xhscht->sched_id)); XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, (buf, "%lx\n", xhscht->scheduler)); XEN_HYPER_PRI(fp, len, "scheduler_struct: ", buf, flag, (buf, "%p\n", xhscht->scheduler_struct)); XEN_HYPER_PRI(fp, len, "sched_context_array: ", buf, flag, (buf, "%p\n", xhscht->sched_context_array)); if (verbose) { for (i = 0, schc = xhscht->sched_context_array; i < xht->pcpus; i++, schc++) { XEN_HYPER_PRI(fp, len, "sched_context_array[", buf, flag, (buf, "%d]\n", i)); XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, (buf, "%lx\n", schc->schedule_data)); XEN_HYPER_PRI(fp, len, "curr: ", buf, flag, (buf, "%lx\n", schc->curr)); XEN_HYPER_PRI(fp, len, "idle: ", buf, flag, (buf, "%lx\n", schc->idle)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", schc->sched_priv)); XEN_HYPER_PRI(fp, len, "tick: ", buf, flag, (buf, "%lx\n", schc->tick)); } } } /* * "help -x siz" output */ static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 23; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "ELF_Prstatus: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Prstatus)); XEN_HYPER_PRI(fp, len, "ELF_Signifo: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Signifo)); XEN_HYPER_PRI(fp, len, "ELF_Gregset: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Gregset)); XEN_HYPER_PRI(fp, len, "ELF_Timeval: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Timeval)); XEN_HYPER_PRI(fp, len, "arch_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.arch_domain)); XEN_HYPER_PRI(fp, len, "arch_shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.arch_shared_info)); XEN_HYPER_PRI(fp, len, "cpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_info)); XEN_HYPER_PRI(fp, len, "cpu_time: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_time)); XEN_HYPER_PRI(fp, len, "cpu_user_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_user_regs)); XEN_HYPER_PRI(fp, len, "cpumask_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpumask_t)); XEN_HYPER_PRI(fp, len, "cpuinfo_ia64: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpuinfo_ia64)); XEN_HYPER_PRI(fp, len, "cpuinfo_x86: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpuinfo_x86)); XEN_HYPER_PRI(fp, len, "crash_note_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_t)); XEN_HYPER_PRI(fp, len, "crash_note_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_core_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_core_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_info_t)); XEN_HYPER_PRI(fp, len, "crash_xen_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_xen_core_t)); XEN_HYPER_PRI(fp, len, "crash_xen_info_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_xen_info_t)); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.domain)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "mm_struct: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.mm_struct)); #endif XEN_HYPER_PRI(fp, len, "note_buf_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.note_buf_t)); XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.schedule_data)); XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.scheduler)); XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.shared_info)); XEN_HYPER_PRI(fp, len, "timer: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.timer)); XEN_HYPER_PRI(fp, len, "tss_struct: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.tss_struct)); XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.vcpu)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.vcpu_runstate_info)); XEN_HYPER_PRI(fp, len, "xen_crash_xen_regs_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.xen_crash_xen_regs_t)); } /* * "help -x ofs" output */ static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 45; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_info)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cursig: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cursig)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sigpend: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sigpend)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sighold: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sighold)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_ppid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_ppid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pgrp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pgrp)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_stime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_stime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cutime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cutime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cstime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cstime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_reg: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_reg)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_fpvalid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_fpvalid)); XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_sec: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_sec)); XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_usec: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_usec)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "arch_domain_mm: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_domain_mm)); #endif XEN_HYPER_PRI(fp, len, "arch_shared_info_max_pfn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_max_pfn)); XEN_HYPER_PRI(fp, len, "arch_shared_info_pfn_to_mfn_frame_list_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_pfn_to_mfn_frame_list_list)); XEN_HYPER_PRI(fp, len, "arch_shared_info_nmi_reason: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_nmi_reason)); XEN_HYPER_PRI(fp, len, "cpu_info_guest_cpu_user_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_guest_cpu_user_regs)); XEN_HYPER_PRI(fp, len, "cpu_info_processor_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_processor_id)); XEN_HYPER_PRI(fp, len, "cpu_info_current_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_current_vcpu)); XEN_HYPER_PRI(fp, len, "cpu_time_local_tsc_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_local_tsc_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_stime_local_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_local_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_stime_master_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_master_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_tsc_scale: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_tsc_scale)); XEN_HYPER_PRI(fp, len, "cpu_time_calibration_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_calibration_timer)); XEN_HYPER_PRI(fp, len, "crash_note_t_core: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_core)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_regs)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_info)); XEN_HYPER_PRI(fp, len, "crash_note_core_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_core_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_desc)); XEN_HYPER_PRI(fp, len, "domain_page_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_page_list)); XEN_HYPER_PRI(fp, len, "domain_xenpage_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_xenpage_list)); XEN_HYPER_PRI(fp, len, "domain_domain_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_domain_id)); XEN_HYPER_PRI(fp, len, "domain_tot_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_tot_pages)); XEN_HYPER_PRI(fp, len, "domain_max_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_max_pages)); XEN_HYPER_PRI(fp, len, "domain_xenheap_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_xenheap_pages)); XEN_HYPER_PRI(fp, len, "domain_shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_shared_info)); XEN_HYPER_PRI(fp, len, "domain_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_sched_priv)); XEN_HYPER_PRI(fp, len, "domain_next_in_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_next_in_list)); XEN_HYPER_PRI(fp, len, "domain_domain_flags: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_domain_flags)); XEN_HYPER_PRI(fp, len, "domain_evtchn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_evtchn)); XEN_HYPER_PRI(fp, len, "domain_is_hvm: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_hvm)); XEN_HYPER_PRI(fp, len, "domain_guest_type: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_guest_type)); XEN_HYPER_PRI(fp, len, "domain_is_privileged: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_privileged)); XEN_HYPER_PRI(fp, len, "domain_debugger_attached: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_debugger_attached)); if (XEN_HYPER_VALID_MEMBER(domain_is_polling)) { XEN_HYPER_PRI(fp, len, "domain_is_polling: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_polling)); } XEN_HYPER_PRI(fp, len, "domain_is_dying: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_dying)); /* Only one of next both exists but print both, ones value is -1. */ XEN_HYPER_PRI(fp, len, "domain_is_paused_by_controller: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_paused_by_controller)); XEN_HYPER_PRI(fp, len, "domain_controller_pause_count: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_controller_pause_count)); XEN_HYPER_PRI(fp, len, "domain_is_shutting_down: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_shutting_down)); XEN_HYPER_PRI(fp, len, "domain_is_shut_down: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_shut_down)); XEN_HYPER_PRI(fp, len, "domain_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_vcpu)); XEN_HYPER_PRI(fp, len, "domain_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_arch)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "mm_struct_pgd: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.mm_struct_pgd)); #endif XEN_HYPER_PRI(fp, len, "schedule_data_schedule_lock: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_schedule_lock)); XEN_HYPER_PRI(fp, len, "schedule_data_curr: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_curr)); XEN_HYPER_PRI(fp, len, "schedule_data_idle: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_idle)); XEN_HYPER_PRI(fp, len, "schedule_data_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_sched_priv)); XEN_HYPER_PRI(fp, len, "schedule_data_s_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_s_timer)); XEN_HYPER_PRI(fp, len, "schedule_data_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_tick)); XEN_HYPER_PRI(fp, len, "scheduler_name: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_name)); XEN_HYPER_PRI(fp, len, "scheduler_opt_name: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_opt_name)); XEN_HYPER_PRI(fp, len, "scheduler_sched_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_sched_id)); XEN_HYPER_PRI(fp, len, "scheduler_init: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_init)); XEN_HYPER_PRI(fp, len, "scheduler_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_tick)); XEN_HYPER_PRI(fp, len, "scheduler_init_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_init_vcpu)); XEN_HYPER_PRI(fp, len, "scheduler_destroy_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_destroy_domain)); XEN_HYPER_PRI(fp, len, "scheduler_sleep: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_sleep)); XEN_HYPER_PRI(fp, len, "scheduler_wake: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_wake)); XEN_HYPER_PRI(fp, len, "scheduler_set_affinity: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_set_affinity)); XEN_HYPER_PRI(fp, len, "scheduler_do_schedule: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_do_schedule)); XEN_HYPER_PRI(fp, len, "scheduler_adjust: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_adjust)); XEN_HYPER_PRI(fp, len, "scheduler_dump_settings: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_settings)); XEN_HYPER_PRI(fp, len, "scheduler_dump_cpu_state: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_cpu_state)); XEN_HYPER_PRI(fp, len, "shared_info_vcpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_vcpu_info)); XEN_HYPER_PRI(fp, len, "shared_info_evtchn_pending: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_pending)); XEN_HYPER_PRI(fp, len, "shared_info_evtchn_mask: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_mask)); XEN_HYPER_PRI(fp, len, "shared_info_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_arch)); XEN_HYPER_PRI(fp, len, "timer_expires: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_expires)); XEN_HYPER_PRI(fp, len, "timer_cpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_cpu)); XEN_HYPER_PRI(fp, len, "timer_function: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_function)); XEN_HYPER_PRI(fp, len, "timer_data: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_data)); XEN_HYPER_PRI(fp, len, "timer_heap_offset: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_heap_offset)); XEN_HYPER_PRI(fp, len, "timer_killed: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_killed)); XEN_HYPER_PRI(fp, len, "tss_struct_rsp0: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.tss_struct_rsp0)); XEN_HYPER_PRI(fp, len, "tss_struct_esp0: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.tss_struct_esp0)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_id)); XEN_HYPER_PRI(fp, len, "vcpu_processor: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_processor)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_info)); XEN_HYPER_PRI(fp, len, "vcpu_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_domain)); XEN_HYPER_PRI(fp, len, "vcpu_next_in_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_next_in_list)); XEN_HYPER_PRI(fp, len, "vcpu_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_timer)); XEN_HYPER_PRI(fp, len, "vcpu_sleep_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_sleep_tick)); XEN_HYPER_PRI(fp, len, "vcpu_poll_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_poll_timer)); XEN_HYPER_PRI(fp, len, "vcpu_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_sched_priv)); XEN_HYPER_PRI(fp, len, "vcpu_runstate: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_guest: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_guest)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_flags: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_flags)); XEN_HYPER_PRI(fp, len, "vcpu_pause_count: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_pause_count)); XEN_HYPER_PRI(fp, len, "vcpu_virq_to_evtchn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_virq_to_evtchn)); XEN_HYPER_PRI(fp, len, "vcpu_cpu_affinity: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_cpu_affinity)); XEN_HYPER_PRI(fp, len, "vcpu_nmi_addr: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_nmi_addr)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_dirty_cpumask: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_dirty_cpumask)); XEN_HYPER_PRI(fp, len, "vcpu_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_arch)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state_entry_time: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state_entry_time)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_time: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_time)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "vcpu_thread_ksp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_thread_ksp)); #endif } /* * dump specified memory with specified size. */ #define DSP_BYTE_SIZE 16 static void xen_hyper_dump_mem(void *mem, ulong len, int dsz) { long i, max; void *mem_w = mem; if (!len || (dsz != SIZEOF_8BIT && dsz != SIZEOF_16BIT && dsz != SIZEOF_32BIT && dsz != SIZEOF_64BIT)) return; max = len / dsz + (len % dsz ? 1 : 0); for (i = 0; i < max; i++) { if (i != 0 && !(i % (DSP_BYTE_SIZE / dsz))) fprintf(fp, "\n"); if (i == 0 || !(i % (DSP_BYTE_SIZE / dsz))) fprintf(fp, "%p : ", mem_w); if (dsz == SIZEOF_8BIT) fprintf(fp, "%02x ", *(uint8_t *)mem_w); else if (dsz == SIZEOF_16BIT) fprintf(fp, "%04x ", *(uint16_t *)mem_w); else if (dsz == SIZEOF_32BIT) fprintf(fp, "%08x ", *(uint32_t *)mem_w); else if (dsz == SIZEOF_64BIT) fprintf(fp, "%016llx ", *(unsigned long long *)mem_w); mem_w = (char *)mem_w + dsz; } fprintf(fp, "\n"); } #endif crash-7.1.4/test.c0000775000000000000000000000475512634305150012463 0ustar rootroot/* test.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2011 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2011 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include static struct option test_long_options[] = { {"no", no_argument, 0, 0}, {"req", required_argument, 0, 0}, {0, 0, 0, 0} }; /* * Test your stuff here first if you'd like. If anything's being done * below in this routine, consider it leftover trash... */ void cmd_test(void) { int c; int option_index; while ((c = getopt_long(argcnt, args, "", test_long_options, &option_index)) != EOF) { switch(c) { case 0: if (STREQ(test_long_options[option_index].name, "no")) fprintf(fp, "no argument\n"); if (STREQ(test_long_options[option_index].name, "req")) fprintf(fp, "required argument: %s\n", optarg); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { ; optind++; } } /* * Scratch routine for testing a feature on a per-task basis by entering * the "foreach test" command. Like cmd_test(), anything that's being done * below in this routine can be considered trash. */ void foreach_test(ulong task, ulong flags) { } /* * Template for building a new command. */ void cmd_template(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { ; optind++; } } crash-7.1.4/x86_64.c0000775000000000000000000071344112634305150012441 0ustar rootroot/* x86_64.c -- core analysis suite * * Copyright (C) 2004-2015 David Anderson * Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "xen_hyper_defs.h" #ifdef X86_64 static int x86_64_kvtop(struct task_context *, ulong, physaddr_t *, int); static int x86_64_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static int x86_64_uvtop(struct task_context *, ulong, physaddr_t *, int); static int x86_64_uvtop_level4(struct task_context *, ulong, physaddr_t *, int); static int x86_64_uvtop_level4_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static int x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static ulong x86_64_vmalloc_start(void); static int x86_64_is_task_addr(ulong); static int x86_64_verify_symbol(const char *, ulong, char); static int x86_64_verify_line_number(ulong, ulong, ulong); static ulong x86_64_get_task_pgd(ulong); static int x86_64_translate_pte(ulong, void *, ulonglong); static ulong x86_64_processor_speed(void); static int is_vsyscall_addr(ulong); struct syment *x86_64_value_to_symbol(ulong, ulong *); static int x86_64_eframe_search(struct bt_info *); static int x86_64_eframe_verify(struct bt_info *, long, long, long, long, long, long); #define EFRAME_PRINT (0x1) #define EFRAME_VERIFY (0x2) #define EFRAME_CS (0x4) #define EFRAME_SEARCH (0x8) static int x86_64_print_eframe_location(ulong, int, FILE *); static void x86_64_back_trace_cmd(struct bt_info *); static ulong x86_64_in_exception_stack(struct bt_info *, int *); static ulong x86_64_in_irqstack(struct bt_info *); static int x86_64_in_alternate_stack(int, ulong); static ulong __schedule_frame_adjust(ulong, struct bt_info *); static void x86_64_low_budget_back_trace_cmd(struct bt_info *); static void x86_64_dwarf_back_trace_cmd(struct bt_info *); static void x86_64_get_dumpfile_stack_frame(struct bt_info *, ulong *, ulong *); static struct syment *x86_64_function_called_by(ulong); static int is_direct_call_target(struct bt_info *); static void get_x86_64_frame(struct bt_info *, ulong *, ulong *); static ulong text_lock_function(char *, struct bt_info *, ulong); static int x86_64_print_stack_entry(struct bt_info *, FILE *, int, int, ulong); static void x86_64_display_full_frame(struct bt_info *, ulong, FILE *); static void x86_64_do_bt_reference_check(struct bt_info *, ulong,char *); static void x86_64_dump_irq(int); static void x86_64_get_irq_affinity(int); static void x86_64_show_interrupts(int, ulong *); static char *x86_64_extract_idt_function(ulong *, char *, ulong *); static ulong x86_64_get_pc(struct bt_info *); static ulong x86_64_get_sp(struct bt_info *); static void x86_64_get_stack_frame(struct bt_info *, ulong *, ulong *); static int x86_64_dis_filter(ulong, char *, unsigned int); static void x86_64_cmd_mach(void); static int x86_64_get_smp_cpus(void); static void x86_64_display_machine_stats(void); static void x86_64_display_cpu_data(unsigned int); static void x86_64_display_memmap(void); static void x86_64_dump_line_number(ulong); static struct line_number_hook x86_64_line_number_hooks[]; static void x86_64_calc_phys_base(void); static int x86_64_is_module_addr(ulong); static int x86_64_is_kvaddr(ulong); static int x86_64_is_uvaddr(ulong, struct task_context *); void x86_64_compiler_warning_stub(void); static void x86_64_init_kernel_pgd(void); static void x86_64_cpu_pda_init(void); static void x86_64_per_cpu_init(void); static void x86_64_ist_init(void); static void x86_64_post_init(void); static void parse_cmdline_args(void); static void x86_64_clear_machdep_cache(void); static void x86_64_irq_eframe_link_init(void); static ulong x86_64_irq_eframe_link(ulong, struct bt_info *, FILE *); static ulong search_for_switch_to(ulong, ulong); static void x86_64_thread_return_init(void); static void x86_64_framepointer_init(void); static int x86_64_virt_phys_base(void); static int x86_64_xendump_p2m_create(struct xendump_data *); static int x86_64_pvops_xendump_p2m_create(struct xendump_data *); static int x86_64_pvops_xendump_p2m_l2_create(struct xendump_data *); static int x86_64_pvops_xendump_p2m_l3_create(struct xendump_data *); static char *x86_64_xendump_load_page(ulong, struct xendump_data *); static int x86_64_xendump_page_index(ulong, struct xendump_data *); static int x86_64_xen_kdump_p2m_create(struct xen_kdump_data *); static char *x86_64_xen_kdump_load_page(ulong, char *); static ulong x86_64_xen_kdump_page_mfn(ulong); static void x86_64_debug_dump_page(FILE *, char *, char *); static void x86_64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); static ulong x86_64_xendump_panic_task(struct xendump_data *); static void x86_64_init_hyper(int); static ulong x86_64_get_stackbase_hyper(ulong); static ulong x86_64_get_stacktop_hyper(ulong); static int x86_64_framesize_cache_resize(void); static int x86_64_framesize_cache_func(int, ulong, int *, int); static ulong x86_64_get_framepointer(struct bt_info *, ulong); int search_for_eframe_target_caller(struct bt_info *, ulong, int *); static int x86_64_get_framesize(struct bt_info *, ulong, ulong); static void x86_64_framesize_debug(struct bt_info *); static void x86_64_get_active_set(void); static int x86_64_get_kvaddr_ranges(struct vaddr_range *); static int x86_64_verify_paddr(uint64_t); static void GART_init(void); static void x86_64_exception_stacks_init(void); struct machine_specific x86_64_machine_specific = { 0 }; /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void x86_64_init(int when) { int len, dim; if (XEN_HYPER_MODE()) { x86_64_init_hyper(when); return; } switch (when) { case SETUP_ENV: machdep->process_elf_notes = x86_process_elf_notes; break; case PRE_SYMTAB: machdep->verify_symbol = x86_64_verify_symbol; machdep->verify_line_number = x86_64_verify_line_number; machdep->machspec = &x86_64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; if ((machdep->machspec->upml = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc upml space."); if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); if ((machdep->machspec->pml4 = (char *)malloc(PAGESIZE()*2)) == NULL) error(FATAL, "cannot malloc pml4 space."); machdep->machspec->last_upml_read = 0; machdep->machspec->last_pml4_read = 0; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = x86_64_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->flags |= MACHDEP_BT_TEXT; machdep->flags |= FRAMESIZE_DEBUG; machdep->machspec->irq_eframe_link = UNINITIALIZED; machdep->get_kvaddr_ranges = x86_64_get_kvaddr_ranges; if (machdep->cmdline_args[0]) parse_cmdline_args(); break; case PRE_GDB: if (!(machdep->flags & VM_FLAGS)) { if (symbol_exists("xen_start_info")) { if (PVOPS()) machdep->flags |= VM_2_6_11; else if (symbol_exists("low_pml4") && symbol_exists("swap_low_mappings")) machdep->flags |= VM_XEN_RHEL4; else machdep->flags |= VM_XEN; } else if (symbol_exists("boot_vmalloc_pgt")) machdep->flags |= VM_ORIG; else machdep->flags |= VM_2_6_11; } switch (machdep->flags & VM_FLAGS) { case VM_ORIG: /* pre-2.6.11 layout */ machdep->machspec->userspace_top = USERSPACE_TOP_ORIG; machdep->machspec->page_offset = PAGE_OFFSET_ORIG; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_ORIG; machdep->machspec->vmalloc_end = VMALLOC_END_ORIG; machdep->machspec->modules_vaddr = MODULES_VADDR_ORIG; machdep->machspec->modules_end = MODULES_END_ORIG; free(machdep->machspec->upml); machdep->machspec->upml = NULL; machdep->uvtop = x86_64_uvtop; break; case VM_2_6_11: /* 2.6.11 layout */ machdep->machspec->userspace_top = USERSPACE_TOP_2_6_11; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_2_6_11; machdep->machspec->vmalloc_end = VMALLOC_END_2_6_11; machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_11; machdep->machspec->modules_end = MODULES_END_2_6_11; /* 2.6.24 layout */ machdep->machspec->vmemmap_vaddr = VMEMMAP_VADDR_2_6_24; machdep->machspec->vmemmap_end = VMEMMAP_END_2_6_24; if (symbol_exists("vmemmap_populate")) machdep->flags |= VMEMMAP; if (kernel_symbol_exists("end_pfn")) /* 2.6.11 layout */ machdep->machspec->page_offset = PAGE_OFFSET_2_6_11; else /* 2.6.27 layout */ machdep->machspec->page_offset = PAGE_OFFSET_2_6_27; machdep->uvtop = x86_64_uvtop_level4; break; case VM_XEN: /* Xen layout */ machdep->machspec->userspace_top = USERSPACE_TOP_XEN; machdep->machspec->page_offset = PAGE_OFFSET_XEN; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN; machdep->machspec->vmalloc_end = VMALLOC_END_XEN; machdep->machspec->modules_vaddr = MODULES_VADDR_XEN; machdep->machspec->modules_end = MODULES_END_XEN; break; case VM_XEN_RHEL4: /* RHEL4 Xen layout */ machdep->machspec->userspace_top = USERSPACE_TOP_XEN_RHEL4; machdep->machspec->page_offset = PAGE_OFFSET_XEN_RHEL4; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN_RHEL4; machdep->machspec->vmalloc_end = VMALLOC_END_XEN_RHEL4; machdep->machspec->modules_vaddr = MODULES_VADDR_XEN_RHEL4; machdep->machspec->modules_end = MODULES_END_XEN_RHEL4; break; } machdep->kvbase = (ulong)PAGE_OFFSET; machdep->identity_map_base = (ulong)PAGE_OFFSET; machdep->is_kvaddr = x86_64_is_kvaddr; machdep->is_uvaddr = x86_64_is_uvaddr; machdep->eframe_search = x86_64_eframe_search; machdep->back_trace = x86_64_low_budget_back_trace_cmd; machdep->processor_speed = x86_64_processor_speed; machdep->kvtop = x86_64_kvtop; machdep->get_task_pgd = x86_64_get_task_pgd; machdep->get_stack_frame = x86_64_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = x86_64_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = x86_64_is_task_addr; machdep->dis_filter = x86_64_dis_filter; machdep->cmd_mach = x86_64_cmd_mach; machdep->get_smp_cpus = x86_64_get_smp_cpus; machdep->value_to_symbol = x86_64_value_to_symbol; machdep->init_kernel_pgd = x86_64_init_kernel_pgd; machdep->clear_machdep_cache = x86_64_clear_machdep_cache; machdep->xendump_p2m_create = x86_64_xendump_p2m_create; machdep->get_xendump_regs = x86_64_get_xendump_regs; machdep->xen_kdump_p2m_create = x86_64_xen_kdump_p2m_create; machdep->xendump_panic_task = x86_64_xendump_panic_task; if (symbol_exists("vgettimeofday")) machdep->machspec->vsyscall_page = PAGEBASE(symbol_value("vgettimeofday")); x86_64_calc_phys_base(); break; case POST_GDB: if (THIS_KERNEL_VERSION >= LINUX(2,6,26) && THIS_KERNEL_VERSION < LINUX(2,6,31)) { machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_26; } if (THIS_KERNEL_VERSION >= LINUX(2,6,27) && THIS_KERNEL_VERSION < LINUX(2,6,31)) { machdep->machspec->modules_end = MODULES_END_2_6_27; } if (THIS_KERNEL_VERSION >= LINUX(2,6,31)) { machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_2_6_31; machdep->machspec->vmalloc_end = VMALLOC_END_2_6_31; machdep->machspec->vmemmap_vaddr = VMEMMAP_VADDR_2_6_31; machdep->machspec->vmemmap_end = VMEMMAP_END_2_6_31; machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_31; machdep->machspec->modules_end = MODULES_END_2_6_31; } STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); /* * Before 2.6.25 the structure was called gate_struct */ if (STRUCT_EXISTS("gate_desc")) STRUCT_SIZE_INIT(gate_struct, "gate_desc"); else STRUCT_SIZE_INIT(gate_struct, "gate_struct"); STRUCT_SIZE_INIT(e820map, "e820map"); STRUCT_SIZE_INIT(e820entry, "e820entry"); MEMBER_OFFSET_INIT(e820map_nr_map, "e820map", "nr_map"); MEMBER_OFFSET_INIT(e820entry_addr, "e820entry", "addr"); MEMBER_OFFSET_INIT(e820entry_size, "e820entry", "size"); MEMBER_OFFSET_INIT(e820entry_type, "e820entry", "type"); if (KVMDUMP_DUMPFILE()) set_kvm_iohole(NULL); MEMBER_OFFSET_INIT(thread_struct_rip, "thread_struct", "rip"); MEMBER_OFFSET_INIT(thread_struct_rsp, "thread_struct", "rsp"); MEMBER_OFFSET_INIT(thread_struct_rsp0, "thread_struct", "rsp0"); if (INVALID_MEMBER(thread_struct_rip)) MEMBER_OFFSET_INIT(thread_struct_rip, "thread_struct", "ip"); if (INVALID_MEMBER(thread_struct_rsp)) MEMBER_OFFSET_INIT(thread_struct_rsp, "thread_struct", "sp"); if (INVALID_MEMBER(thread_struct_rsp0)) MEMBER_OFFSET_INIT(thread_struct_rsp0, "thread_struct", "sp0"); STRUCT_SIZE_INIT(tss_struct, "tss_struct"); MEMBER_OFFSET_INIT(tss_struct_ist, "tss_struct", "ist"); if (INVALID_MEMBER(tss_struct_ist)) { long x86_tss_offset, ist_offset; x86_tss_offset = MEMBER_OFFSET("tss_struct", "x86_tss"); ist_offset = MEMBER_OFFSET("x86_hw_tss", "ist"); if ((x86_tss_offset != INVALID_OFFSET) && (ist_offset != INVALID_OFFSET)) ASSIGN_OFFSET(tss_struct_ist) = x86_tss_offset + ist_offset; } MEMBER_OFFSET_INIT(user_regs_struct_rip, "user_regs_struct", "rip"); if (INVALID_MEMBER(user_regs_struct_rip)) MEMBER_OFFSET_INIT(user_regs_struct_rip, "user_regs_struct", "ip"); MEMBER_OFFSET_INIT(user_regs_struct_rsp, "user_regs_struct", "rsp"); if (INVALID_MEMBER(user_regs_struct_rsp)) MEMBER_OFFSET_INIT(user_regs_struct_rsp, "user_regs_struct", "sp"); MEMBER_OFFSET_INIT(user_regs_struct_eflags, "user_regs_struct", "eflags"); if (INVALID_MEMBER(user_regs_struct_eflags)) MEMBER_OFFSET_INIT(user_regs_struct_eflags, "user_regs_struct", "flags"); MEMBER_OFFSET_INIT(user_regs_struct_cs, "user_regs_struct", "cs"); MEMBER_OFFSET_INIT(user_regs_struct_ss, "user_regs_struct", "ss"); MEMBER_OFFSET_INIT(user_regs_struct_rax, "user_regs_struct", "rax"); if (INVALID_MEMBER(user_regs_struct_rax)) MEMBER_OFFSET_INIT(user_regs_struct_rax, "user_regs_struct", "ax"); MEMBER_OFFSET_INIT(user_regs_struct_rbx, "user_regs_struct", "rbx"); if (INVALID_MEMBER(user_regs_struct_rbx)) MEMBER_OFFSET_INIT(user_regs_struct_rbx, "user_regs_struct", "bx"); MEMBER_OFFSET_INIT(user_regs_struct_rcx, "user_regs_struct", "rcx"); if (INVALID_MEMBER(user_regs_struct_rcx)) MEMBER_OFFSET_INIT(user_regs_struct_rcx, "user_regs_struct", "cx"); MEMBER_OFFSET_INIT(user_regs_struct_rdx, "user_regs_struct", "rdx"); if (INVALID_MEMBER(user_regs_struct_rdx)) MEMBER_OFFSET_INIT(user_regs_struct_rdx, "user_regs_struct", "dx"); MEMBER_OFFSET_INIT(user_regs_struct_rsi, "user_regs_struct", "rsi"); if (INVALID_MEMBER(user_regs_struct_rsi)) MEMBER_OFFSET_INIT(user_regs_struct_rsi, "user_regs_struct", "si"); MEMBER_OFFSET_INIT(user_regs_struct_rdi, "user_regs_struct", "rdi"); if (INVALID_MEMBER(user_regs_struct_rdi)) MEMBER_OFFSET_INIT(user_regs_struct_rdi, "user_regs_struct", "di"); MEMBER_OFFSET_INIT(user_regs_struct_rbp, "user_regs_struct", "rbp"); if (INVALID_MEMBER(user_regs_struct_rbp)) MEMBER_OFFSET_INIT(user_regs_struct_rbp, "user_regs_struct", "bp"); MEMBER_OFFSET_INIT(user_regs_struct_r8, "user_regs_struct", "r8"); MEMBER_OFFSET_INIT(user_regs_struct_r9, "user_regs_struct", "r9"); MEMBER_OFFSET_INIT(user_regs_struct_r10, "user_regs_struct", "r10"); MEMBER_OFFSET_INIT(user_regs_struct_r11, "user_regs_struct", "r11"); MEMBER_OFFSET_INIT(user_regs_struct_r12, "user_regs_struct", "r12"); MEMBER_OFFSET_INIT(user_regs_struct_r13, "user_regs_struct", "r13"); MEMBER_OFFSET_INIT(user_regs_struct_r14, "user_regs_struct", "r14"); MEMBER_OFFSET_INIT(user_regs_struct_r15, "user_regs_struct", "r15"); STRUCT_SIZE_INIT(user_regs_struct, "user_regs_struct"); if (!VALID_STRUCT(user_regs_struct)) { /* Use this hardwired version -- sometimes the * debuginfo doesn't pick this up even though * it exists in the kernel; it shouldn't change. */ struct x86_64_user_regs_struct { unsigned long r15, r14, r13, r12, bp, bx; unsigned long r11, r10, r9, r8, ax, cx, dx; unsigned long si, di, orig_ax, ip, cs; unsigned long flags, sp, ss, fs_base; unsigned long gs_base, ds, es, fs, gs; }; ASSIGN_SIZE(user_regs_struct) = sizeof(struct x86_64_user_regs_struct); ASSIGN_OFFSET(user_regs_struct_rip) = offsetof(struct x86_64_user_regs_struct, ip); ASSIGN_OFFSET(user_regs_struct_rsp) = offsetof(struct x86_64_user_regs_struct, sp); ASSIGN_OFFSET(user_regs_struct_eflags) = offsetof(struct x86_64_user_regs_struct, flags); ASSIGN_OFFSET(user_regs_struct_cs) = offsetof(struct x86_64_user_regs_struct, cs); ASSIGN_OFFSET(user_regs_struct_ss) = offsetof(struct x86_64_user_regs_struct, ss); ASSIGN_OFFSET(user_regs_struct_rax) = offsetof(struct x86_64_user_regs_struct, ax); ASSIGN_OFFSET(user_regs_struct_rbx) = offsetof(struct x86_64_user_regs_struct, bx); ASSIGN_OFFSET(user_regs_struct_rcx) = offsetof(struct x86_64_user_regs_struct, cx); ASSIGN_OFFSET(user_regs_struct_rdx) = offsetof(struct x86_64_user_regs_struct, dx); ASSIGN_OFFSET(user_regs_struct_rsi) = offsetof(struct x86_64_user_regs_struct, si); ASSIGN_OFFSET(user_regs_struct_rdi) = offsetof(struct x86_64_user_regs_struct, di); ASSIGN_OFFSET(user_regs_struct_rbp) = offsetof(struct x86_64_user_regs_struct, bp); ASSIGN_OFFSET(user_regs_struct_r8) = offsetof(struct x86_64_user_regs_struct, r8); ASSIGN_OFFSET(user_regs_struct_r9) = offsetof(struct x86_64_user_regs_struct, r9); ASSIGN_OFFSET(user_regs_struct_r10) = offsetof(struct x86_64_user_regs_struct, r10); ASSIGN_OFFSET(user_regs_struct_r11) = offsetof(struct x86_64_user_regs_struct, r11); ASSIGN_OFFSET(user_regs_struct_r12) = offsetof(struct x86_64_user_regs_struct, r12); ASSIGN_OFFSET(user_regs_struct_r13) = offsetof(struct x86_64_user_regs_struct, r13); ASSIGN_OFFSET(user_regs_struct_r14) = offsetof(struct x86_64_user_regs_struct, r14); ASSIGN_OFFSET(user_regs_struct_r15) = offsetof(struct x86_64_user_regs_struct, r15); } machdep->vmalloc_start = x86_64_vmalloc_start; vt->vmalloc_start = machdep->vmalloc_start(); machdep->init_kernel_pgd(); if (STRUCT_EXISTS("x8664_pda")) x86_64_cpu_pda_init(); else x86_64_per_cpu_init(); x86_64_ist_init(); if (symbol_exists("repeat_nmi")) machdep->flags |= NESTED_NMI; machdep->in_alternate_stack = x86_64_in_alternate_stack; if ((machdep->machspec->irqstack = (char *) malloc(machdep->machspec->stkinfo.isize)) == NULL) error(FATAL, "cannot malloc irqstack space."); if (symbol_exists("irq_desc")) { if (LKCD_KERNTYPES()) ARRAY_LENGTH_INIT_ALT(machdep->nr_irqs, "irq_desc", "kernel_stat.irqs", NULL, 0); else ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); } else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 224; /* NR_IRQS (at least) */ machdep->dump_irq = x86_64_dump_irq; machdep->get_irq_affinity = x86_64_get_irq_affinity; machdep->show_interrupts = x86_64_show_interrupts; if (THIS_KERNEL_VERSION < LINUX(2,6,24)) machdep->line_number_hooks = x86_64_line_number_hooks; if (!machdep->hz) { machdep->hz = HZ; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) machdep->hz = 1000; } machdep->section_size_bits = _SECTION_SIZE_BITS; if (!machdep->max_physmem_bits) { if (THIS_KERNEL_VERSION >= LINUX(2,6,31)) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_31; else if (THIS_KERNEL_VERSION >= LINUX(2,6,26)) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_26; else { machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; len = get_array_length("mem_section", &dim, 0); /* * Check for patched MAX_PHYSMEM_BITS. */ if (((len > 32) && !dim) || ((len > 8192) && (dim == 1))) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_26; } } if (XEN()) { if (kt->xen_flags & WRITABLE_PAGE_TABLES) { switch (machdep->flags & VM_FLAGS) { case VM_XEN: case VM_2_6_11: machdep->uvtop = x86_64_uvtop_level4_xen_wpt; break; case VM_XEN_RHEL4: machdep->uvtop = x86_64_uvtop_level4_rhel4_xen_wpt; break; } } else machdep->uvtop = x86_64_uvtop_level4; MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs, "vcpu_guest_context", "user_regs"); ASSIGN_OFFSET(cpu_user_regs_rsp) = MEMBER_OFFSET("cpu_user_regs", "ss") - sizeof(ulong); ASSIGN_OFFSET(cpu_user_regs_rip) = MEMBER_OFFSET("cpu_user_regs", "cs") - sizeof(ulong); } x86_64_irq_eframe_link_init(); x86_64_framepointer_init(); x86_64_thread_return_init(); if (THIS_KERNEL_VERSION >= LINUX(2,6,28)) machdep->machspec->page_protnone = _PAGE_GLOBAL; else machdep->machspec->page_protnone = _PAGE_PSE; STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); STRUCT_SIZE_INIT(percpu_data, "percpu_data"); GART_init(); break; case POST_VM: init_unwind_table(); break; case POST_INIT: x86_64_post_init(); x86_64_get_active_set(); break; case LOG_ONLY: machdep->machspec = &x86_64_machine_specific; x86_64_calc_phys_base(); break; } } void x86_64_dump_machdep_table(ulong arg) { int c, i, cpus; int others; struct machine_specific *ms; ms = machdep->machspec; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PT_REGS_INIT) fprintf(fp, "%sPT_REGS_INIT", others++ ? "|" : ""); if (machdep->flags & MACHDEP_BT_TEXT) fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); if (machdep->flags & VM_ORIG) fprintf(fp, "%sVM_ORIG", others++ ? "|" : ""); if (machdep->flags & VM_2_6_11) fprintf(fp, "%sVM_2_6_11", others++ ? "|" : ""); if (machdep->flags & VM_XEN) fprintf(fp, "%sVM_XEN", others++ ? "|" : ""); if (machdep->flags & VM_XEN_RHEL4) fprintf(fp, "%sVM_XEN_RHEL4", others++ ? "|" : ""); if (machdep->flags & VMEMMAP) fprintf(fp, "%sVMEMMAP", others++ ? "|" : ""); if (machdep->flags & NO_TSS) fprintf(fp, "%sNO_TSS", others++ ? "|" : ""); if (machdep->flags & SCHED_TEXT) fprintf(fp, "%sSCHED_TEXT", others++ ? "|" : ""); if (machdep->flags & PHYS_BASE) fprintf(fp, "%sPHYS_BASE", others++ ? "|" : ""); if (machdep->flags & FRAMESIZE_DEBUG) fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); if (machdep->flags & FRAMEPOINTER) fprintf(fp, "%sFRAMEPOINTER", others++ ? "|" : ""); if (machdep->flags & GART_REGION) fprintf(fp, "%sGART_REGION", others++ ? "|" : ""); if (machdep->flags & NESTED_NMI) fprintf(fp, "%sNESTED_NMI", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->kvbase); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %llu (0x%llx)\n", (ulonglong)machdep->memsize, (ulonglong)machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: x86_64_eframe_search()\n"); if (machdep->back_trace == x86_64_back_trace_cmd) fprintf(fp, " back_trace: x86_64_back_trace_cmd()\n"); else if (machdep->back_trace == x86_64_low_budget_back_trace_cmd) fprintf(fp, " back_trace: x86_64_low_budget_back_trace_cmd() %s\n", kt->flags & DWARF_UNWIND ? "-> x86_64_dwarf_back_trace_cmd()" : ""); else if (machdep->back_trace == x86_64_dwarf_back_trace_cmd) fprintf(fp, " back_trace: x86_64_dwarf_back_trace_cmd() %s\n", kt->flags & DWARF_UNWIND ? "" : "->x86_64_low_budget_back_trace_cmd()"); else fprintf(fp, " back_trace: %lx\n", (ulong)machdep->back_trace); fprintf(fp, " processor_speed: x86_64_processor_speed()\n"); if (machdep->uvtop == x86_64_uvtop) fprintf(fp, " uvtop: x86_64_uvtop()\n"); else if (machdep->uvtop == x86_64_uvtop_level4) fprintf(fp, " uvtop: x86_64_uvtop_level4()\n"); else if (machdep->uvtop == x86_64_uvtop_level4_xen_wpt) fprintf(fp, " uvtop: x86_64_uvtop_level4_xen_wpt()\n"); else if (machdep->uvtop == x86_64_uvtop_level4_rhel4_xen_wpt) fprintf(fp, " uvtop: x86_64_uvtop_level4_rhel4_xen_wpt()\n"); else fprintf(fp, " uvtop: %lx\n", (ulong)machdep->uvtop); fprintf(fp, " kvtop: x86_64_kvtop()"); if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) fprintf(fp, " -> x86_64_kvtop_xen_wpt()"); fprintf(fp, "\n"); fprintf(fp, " get_task_pgd: x86_64_get_task_pgd()\n"); fprintf(fp, " dump_irq: x86_64_dump_irq()\n"); fprintf(fp, " get_irq_affinity: x86_64_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: x86_64_show_interrupts()\n"); fprintf(fp, " get_stack_frame: x86_64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: x86_64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: x86_64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: x86_64_is_task_addr()\n"); fprintf(fp, " verify_symbol: x86_64_verify_symbol()\n"); fprintf(fp, " dis_filter: x86_64_dis_filter()\n"); fprintf(fp, " cmd_mach: x86_64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: x86_64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: x86_64_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: x86_64_is_uvaddr()\n"); fprintf(fp, " verify_paddr: x86_64_verify_paddr()\n"); fprintf(fp, " get_kvaddr_ranges: x86_64_get_kvaddr_ranges()\n"); fprintf(fp, " init_kernel_pgd: x86_64_init_kernel_pgd()\n"); fprintf(fp, "clear_machdep_cache: x86_64_clear_machdep_cache()\n"); fprintf(fp, " xendump_p2m_create: %s\n", PVOPS_XEN() ? "x86_64_pvops_xendump_p2m_create()" : "x86_64_xendump_p2m_create()"); fprintf(fp, " get_xendump_regs: x86_64_get_xendump_regs()\n"); fprintf(fp, " xendump_panic_task: x86_64_xendump_panic_task()\n"); fprintf(fp, "xen_kdump_p2m_create: x86_64_xen_kdump_p2m_create()\n"); fprintf(fp, " line_number_hooks: %s\n", machdep->line_number_hooks ? "x86_64_line_number_hooks" : "(unused)"); fprintf(fp, " verify_line_number: x86_64_verify_line_number()\n"); fprintf(fp, " value_to_symbol: x86_64_value_to_symbol()\n"); fprintf(fp, " in_alternate_stack: x86_64_in_alternate_stack()\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " machspec: %016lx\n", (ulong)machdep->machspec); fprintf(fp, " userspace_top: %016lx\n", (ulong)ms->userspace_top); fprintf(fp, " page_offset: %016lx\n", (ulong)ms->page_offset); fprintf(fp, " vmalloc_start_addr: %016lx\n", (ulong)ms->vmalloc_start_addr); fprintf(fp, " vmalloc_end: %016lx\n", (ulong)ms->vmalloc_end); fprintf(fp, " modules_vaddr: %016lx\n", (ulong)ms->modules_vaddr); fprintf(fp, " modules_end: %016lx\n", (ulong)ms->modules_end); fprintf(fp, " vmemmap_vaddr: %016lx %s\n", (ulong)ms->vmemmap_vaddr, machdep->flags & VMEMMAP ? "" : "(unused)"); fprintf(fp, " vmemmap_end: %016lx %s\n", (ulong)ms->vmemmap_end, machdep->flags & VMEMMAP ? "" : "(unused)"); fprintf(fp, " phys_base: %lx\n", (ulong)ms->phys_base); fprintf(fp, " GART_start: %lx\n", ms->GART_start); fprintf(fp, " GART_end: %lx\n", ms->GART_end); fprintf(fp, " pml4: %lx\n", (ulong)ms->pml4); fprintf(fp, " last_pml4_read: %lx\n", (ulong)ms->last_pml4_read); if (ms->upml) { fprintf(fp, " upml: %lx\n", (ulong)ms->upml); fprintf(fp, " last_upml_read: %lx\n", (ulong)ms->last_upml_read); } else { fprintf(fp, " upml: (unused)\n"); fprintf(fp, " last_upml_read: (unused)\n"); } fprintf(fp, " irqstack: %lx\n", (ulong)ms->irqstack); fprintf(fp, " irq_eframe_link: %ld\n", ms->irq_eframe_link); fprintf(fp, " pto: %s", machdep->flags & PT_REGS_INIT ? "\n" : "(uninitialized)\n"); if (machdep->flags & PT_REGS_INIT) { fprintf(fp, " r15: %ld\n", ms->pto.r15); fprintf(fp, " r14: %ld\n", ms->pto.r14); fprintf(fp, " r13: %ld\n", ms->pto.r13); fprintf(fp, " r12: %ld\n", ms->pto.r12); fprintf(fp, " rbp: %ld\n", ms->pto.rbp); fprintf(fp, " rbx: %ld\n", ms->pto.rbx); fprintf(fp, " r11: %ld\n", ms->pto.r11); fprintf(fp, " r10: %ld\n", ms->pto.r10); fprintf(fp, " r9: %ld\n", ms->pto.r9); fprintf(fp, " r8: %ld\n", ms->pto.r8); fprintf(fp, " rax: %ld\n", ms->pto.rax); fprintf(fp, " rcx: %ld\n", ms->pto.rcx); fprintf(fp, " rdx: %ld\n", ms->pto.rdx); fprintf(fp, " rsi: %ld\n", ms->pto.rsi); fprintf(fp, " rdi: %ld\n", ms->pto.rdi); fprintf(fp, " orig_rax: %ld\n", ms->pto.orig_rax); fprintf(fp, " rip: %ld\n", ms->pto.rip); fprintf(fp, " cs: %ld\n", ms->pto.cs); fprintf(fp, " eflags: %ld\n", ms->pto.eflags); fprintf(fp, " rsp: %ld\n", ms->pto.rsp); fprintf(fp, " ss: %ld\n", ms->pto.ss); } #define CPU_SPACES(C) \ ((C) < 10 ? 3 : (C) < 100 ? 2 : (C) < 1000 ? 1 : 0) fprintf(fp, "%s current[%d]:%s", space(CPU_SPACES(kt->cpus)), kt->cpus, ms->current ? "\n " : " (unused)\n"); for (c = 0; ms->current && (c < kt->cpus); c++) { if (c && !(c%4)) fprintf(fp, "\n "); fprintf(fp, "%016lx ", ms->current[c]); } if (ms->current) fprintf(fp, "\n"); fprintf(fp, "%s crash_nmi_rsp[%d]:%s", space(CPU_SPACES(kt->cpus)), kt->cpus, ms->crash_nmi_rsp ? "\n " : " (unused)\n"); for (c = 0; ms->crash_nmi_rsp && (c < kt->cpus); c++) { if (c && !(c%4)) fprintf(fp, "\n "); fprintf(fp, "%016lx ", ms->crash_nmi_rsp[c]); } if (ms->crash_nmi_rsp) fprintf(fp, "\n"); fprintf(fp, " vsyscall_page: %lx\n", ms->vsyscall_page); fprintf(fp, " thread_return: %lx\n", ms->thread_return); fprintf(fp, " page_protnone: %lx\n", ms->page_protnone); fprintf(fp, " stkinfo: isize: %d\n", ms->stkinfo.isize); fprintf(fp, " esize[%d]: %d,%d,%d,%d,%d,%d,%d%s\n", MAX_EXCEPTION_STACKS, ms->stkinfo.esize[0], ms->stkinfo.esize[1], ms->stkinfo.esize[2], ms->stkinfo.esize[3], ms->stkinfo.esize[4], ms->stkinfo.esize[5], ms->stkinfo.esize[6], machdep->flags & NO_TSS ? " (NO TSS) " : " "); fprintf(fp, " NMI_stack_index: %d\n", ms->stkinfo.NMI_stack_index); fprintf(fp, " exception_stacks:\n"); for (i = 0; i < MAX_EXCEPTION_STACKS; i++) fprintf(fp, " [%d]: %s\n", i, ms->stkinfo.exception_stacks[i]); fprintf(fp, " ebase[%s][%d]:", arg ? "NR_CPUS" : "cpus", MAX_EXCEPTION_STACKS); cpus = arg ? NR_CPUS : kt->cpus; for (c = 0; c < cpus; c++) { fprintf(fp, "\n %s[%d]: ", c < 10 ? " " : "", c); for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { fprintf(fp, "%016lx ", ms->stkinfo.ebase[c][i]); if (i == 3) fprintf(fp, "\n "); } } fprintf(fp, "\n ibase[%s]:\n ", arg ? "NR_CPUS" : "cpus"); for (c = 0; c < cpus; c++) { if (c && !(c%4)) fprintf(fp, "\n "); fprintf(fp, "%016lx ", ms->stkinfo.ibase[c]); } fprintf(fp, "\n"); } /* * Gather the cpu_pda array info, updating any smp-related items that * were possibly bypassed or improperly initialized in kernel_init(). */ static void x86_64_cpu_pda_init(void) { int i, cpus, nr_pda, cpunumber, _cpu_pda, _boot_cpu_pda; char *cpu_pda_buf; ulong level4_pgt, data_offset, cpu_pda_addr; struct syment *sp, *nsp; ulong offset, istacksize; _boot_cpu_pda = FALSE; level4_pgt = 0; STRUCT_SIZE_INIT(x8664_pda, "x8664_pda"); MEMBER_OFFSET_INIT(x8664_pda_pcurrent, "x8664_pda", "pcurrent"); MEMBER_OFFSET_INIT(x8664_pda_data_offset, "x8664_pda", "data_offset"); MEMBER_OFFSET_INIT(x8664_pda_kernelstack, "x8664_pda", "kernelstack"); MEMBER_OFFSET_INIT(x8664_pda_irqrsp, "x8664_pda", "irqrsp"); MEMBER_OFFSET_INIT(x8664_pda_irqstackptr, "x8664_pda", "irqstackptr"); MEMBER_OFFSET_INIT(x8664_pda_level4_pgt, "x8664_pda", "level4_pgt"); MEMBER_OFFSET_INIT(x8664_pda_cpunumber, "x8664_pda", "cpunumber"); MEMBER_OFFSET_INIT(x8664_pda_me, "x8664_pda", "me"); cpu_pda_buf = GETBUF(SIZE(x8664_pda)); if (LKCD_KERNTYPES()) { if (symbol_exists("_cpu_pda")) _cpu_pda = TRUE; else _cpu_pda = FALSE; nr_pda = get_cpus_possible(); } else { if (symbol_exists("_cpu_pda")) { if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0))) nr_pda = NR_CPUS; _cpu_pda = TRUE; } else { if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) nr_pda = NR_CPUS; _cpu_pda = FALSE; } } if (_cpu_pda) { if (symbol_exists("_boot_cpu_pda")) _boot_cpu_pda = TRUE; else _boot_cpu_pda = FALSE; } if (DUMPFILE() && !(machdep->machspec->current = calloc(nr_pda, sizeof(ulong)))) error(FATAL, "cannot calloc %d x86_64 current pointers!\n", nr_pda); for (i = cpus = 0; i < nr_pda; i++) { if (_cpu_pda) { if (_boot_cpu_pda) { if (!_CPU_PDA_READ2(i, cpu_pda_buf)) break; } else { if (!_CPU_PDA_READ(i, cpu_pda_buf)) break; } } else { if (!CPU_PDA_READ(i, cpu_pda_buf)) break; } if (VALID_MEMBER(x8664_pda_level4_pgt)) { level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); if (!VALID_LEVEL4_PGT_ADDR(level4_pgt)) break; } cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); if (cpunumber != cpus) break; cpus++; if (VALID_MEMBER(x8664_pda_data_offset)) { data_offset = ULONG(cpu_pda_buf + OFFSET(x8664_pda_data_offset)); kt->__per_cpu_offset[i] = data_offset; kt->flags |= PER_CPU_OFF; } else data_offset = 0; machdep->machspec->stkinfo.ibase[i] = ULONG(cpu_pda_buf + OFFSET(x8664_pda_irqstackptr)); if (DUMPFILE()) machdep->machspec->current[i] = ULONG(cpu_pda_buf + OFFSET(x8664_pda_pcurrent)); if (CRASHDEBUG(2)) fprintf(fp, "CPU%d: level4_pgt: %lx " "data_offset: %lx pcurrent: %lx\n", i, level4_pgt, data_offset, DUMPFILE() ? machdep->machspec->current[i] : 0); } if (!LKCD_KERNTYPES() && (i = get_array_length("boot_cpu_stack", NULL, 0))) { istacksize = i; } else if ((sp = symbol_search("boot_cpu_stack")) && (nsp = next_symbol(NULL, sp))) { istacksize = (nsp->value - sp->value) & ~(PAGESIZE()-1); if (istacksize != 16384) error(WARNING, "calculated irqstack size of %ld != 16K?\n\n", istacksize); } else istacksize = 16384; machdep->machspec->stkinfo.isize = istacksize; /* * Adjust the kernel top-of-stack values down to their base. */ for (i = 0; i < NR_CPUS; i++) { if (machdep->machspec->stkinfo.ibase[i]) machdep->machspec->stkinfo.ibase[i] -= (istacksize-64); else break; } /* * Sanity check cpu 0's IRQ stack, which should be located at * the address of &boot_cpu_stack[0]. */ sp = value_search(machdep->machspec->stkinfo.ibase[0], &offset); nsp = symbol_search("boot_cpu_stack"); if (!sp || offset || !nsp || (sp->value != nsp->value)) { if (symbol_exists("boot_cpu_stack")) { error(WARNING, "cpu 0 IRQ stack: %lx\n boot_cpu_stack: %lx\n\n", machdep->machspec->stkinfo.ibase[0], symbol_value("boot_cpu_stack")); if (!machdep->machspec->stkinfo.ibase[0]) machdep->machspec->stkinfo.ibase[0] = symbol_value("boot_cpu_stack"); } else error(WARNING, "boot_cpu_stack: symbol does not exist in this kernel!\n"); } kt->cpus = cpus; if (kt->cpus > 1) kt->flags |= SMP; verify_spinlock(); FREEBUF(cpu_pda_buf); } static void x86_64_per_cpu_init(void) { int i, cpus, cpunumber; struct machine_specific *ms; struct syment *irq_sp, *curr_sp, *cpu_sp; ms = machdep->machspec; irq_sp = per_cpu_symbol_search("per_cpu__irq_stack_union"); cpu_sp = per_cpu_symbol_search("per_cpu__cpu_number"); curr_sp = per_cpu_symbol_search("per_cpu__current_task"); if (!(kt->flags & PER_CPU_OFF)) { /* * Presume kernel is !CONFIG_SMP. */ if (irq_sp || (irq_sp = symbol_search("irq_stack_union"))) { ms->stkinfo.ibase[0] = irq_sp->value; if ((ms->stkinfo.isize = MEMBER_SIZE("irq_stack_union", "irq_stack")) <= 0) ms->stkinfo.isize = 16384; } if (DUMPFILE() && curr_sp) { if (!(ms->current = calloc(kt->cpus, sizeof(ulong)))) error(FATAL, "cannot calloc" " %d x86_64 current pointers!\n", kt->cpus); get_symbol_data(curr_sp->name, sizeof(ulong), &ms->current[0]); } return; } if (!cpu_sp || !irq_sp) return; for (i = cpus = 0; i < NR_CPUS; i++) { if (!readmem(cpu_sp->value + kt->__per_cpu_offset[i], KVADDR, &cpunumber, sizeof(int), "cpu number (per_cpu)", QUIET|RETURN_ON_ERROR)) break; if (cpunumber != cpus) break; cpus++; ms->stkinfo.ibase[i] = irq_sp->value + kt->__per_cpu_offset[i]; } if ((ms->stkinfo.isize = MEMBER_SIZE("irq_stack_union", "irq_stack")) <= 0) ms->stkinfo.isize = 16384; if (CRASHDEBUG(2)) fprintf(fp, "x86_64_per_cpu_init: " "setup_percpu areas: %d\n", cpus); if (cpus > 1) kt->flags |= SMP; if ((i = get_cpus_present()) && (!cpus || (i < cpus))) kt->cpus = get_highest_cpu_present() + 1; else kt->cpus = cpus; if (DUMPFILE() && curr_sp) { if ((ms->current = calloc(kt->cpus, sizeof(ulong))) == NULL) error(FATAL, "cannot calloc %d x86_64 current pointers!\n", kt->cpus); for (i = 0; i < kt->cpus; i++) if (!readmem(curr_sp->value + kt->__per_cpu_offset[i], KVADDR, &ms->current[i], sizeof(ulong), "current_task (per_cpu)", RETURN_ON_ERROR)) continue; } verify_spinlock(); } /* * Gather the ist addresses for each CPU. */ static void x86_64_ist_init(void) { int c, i, cnt, cpus, esize; ulong vaddr, offset; ulong init_tss; struct machine_specific *ms; struct syment *boot_sp, *tss_sp, *ist_sp; ms = machdep->machspec; tss_sp = per_cpu_symbol_search("per_cpu__init_tss"); ist_sp = per_cpu_symbol_search("per_cpu__orig_ist"); x86_64_exception_stacks_init(); if (!tss_sp && symbol_exists("init_tss")) { init_tss = symbol_value("init_tss"); for (c = cpus = 0; c < NR_CPUS; c++) { vaddr = init_tss + (c * SIZE(tss_struct)) + OFFSET(tss_struct_ist); readmem(vaddr, KVADDR, &ms->stkinfo.ebase[c][0], sizeof(ulong) * MAX_EXCEPTION_STACKS, "tss_struct ist array", FAULT_ON_ERROR); if (ms->stkinfo.ebase[c][0] == 0) break; } } else if (tss_sp) { for (c = 0; c < kt->cpus; c++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { if (kt->__per_cpu_offset[c] == 0) break; vaddr = tss_sp->value + kt->__per_cpu_offset[c]; } else vaddr = tss_sp->value; vaddr += OFFSET(tss_struct_ist); readmem(vaddr, KVADDR, &ms->stkinfo.ebase[c][0], sizeof(ulong) * MAX_EXCEPTION_STACKS, "tss_struct ist array", FAULT_ON_ERROR); if (ms->stkinfo.ebase[c][0] == 0) break; } if (ist_sp) { for (c = 0; c < kt->cpus; c++) { ulong estacks[MAX_EXCEPTION_STACKS]; if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { if (kt->__per_cpu_offset[c] == 0) break; vaddr = ist_sp->value + kt->__per_cpu_offset[c]; } else vaddr = ist_sp->value; readmem(vaddr, KVADDR, &estacks[0], sizeof(ulong) * MAX_EXCEPTION_STACKS, "orig_ist array", FAULT_ON_ERROR); for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i] != estacks[i]) error(WARNING, "cpu %d %s stack: init_tss: %lx orig_ist: %lx\n", c, ms->stkinfo.exception_stacks[i], ms->stkinfo.ebase[c][i], estacks[i]); ms->stkinfo.ebase[c][i] = estacks[i]; } } } } else if (!symbol_exists("boot_exception_stacks")) { machdep->flags |= NO_TSS; if (CRASHDEBUG(1)) error(NOTE, "CONFIG_X86_NO_TSS\n"); return; } if (ms->stkinfo.ebase[0][0] && ms->stkinfo.ebase[0][1]) esize = ms->stkinfo.ebase[0][1] - ms->stkinfo.ebase[0][0]; else esize = 4096; /* * Knowing the size, now adjust the top-of-stack addresses back down * to the base stack address. */ for (c = 0; c < kt->cpus; c++) { for (i = cnt = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i] == 0) break; cnt++; if ((THIS_KERNEL_VERSION >= LINUX(2,6,18)) && STREQ(ms->stkinfo.exception_stacks[i], "DEBUG")) ms->stkinfo.esize[i] = esize*2; else ms->stkinfo.esize[i] = esize; ms->stkinfo.ebase[c][i] -= ms->stkinfo.esize[i]; } } /* * Sanity check cpu 0's first exception stack, which should be * located at: &boot_exception_stacks[0] */ boot_sp = value_search(ms->stkinfo.ebase[0][0], &offset); if (!boot_sp || offset || !STREQ(boot_sp->name, "boot_exception_stacks")) { if ((boot_sp = symbol_search("boot_exception_stacks"))) { error(WARNING, "cpu 0 first exception stack: %lx\n boot_exception_stacks: %lx\n\n", ms->stkinfo.ebase[0][0], boot_sp->value); if (!ms->stkinfo.ebase[0][0]) ms->stkinfo.ebase[0][0] = boot_sp->value; } else if (STRUCT_EXISTS("x8664_pda")) error(WARNING, "boot_exception_stacks: symbol does not exist in this kernel!\n"); } } static void x86_64_post_init(void) { int c, i, clues; struct machine_specific *ms; ulong *up; struct syment *spt, *spc; ulong offset; /* * Check whether each cpu was stopped by an NMI. */ ms = machdep->machspec; if (DUMPFILE() && (ms->crash_nmi_rsp = calloc(kt->cpus, sizeof(ulong))) == NULL) error(FATAL, "cannot calloc %d x86_64 NMI rsp values\n", kt->cpus); for (c = 0; DUMPFILE() && (c < kt->cpus); c++) { if (ms->stkinfo.ebase[c][NMI_STACK] == 0) break; if (!readmem(ms->stkinfo.ebase[c][NMI_STACK], KVADDR, ms->irqstack, ms->stkinfo.esize[NMI_STACK], "NMI exception stack contents", RETURN_ON_ERROR|QUIET)) continue; for (i = clues = 0; i < (ms->stkinfo.esize[NMI_STACK])/sizeof(ulong); i++){ up = (ulong *)(&ms->irqstack[i*sizeof(ulong)]); if (!is_kernel_text(*up) || !(spt = value_search(*up, &offset))) continue; if (STREQ(spt->name, "try_crashdump") || STREQ(spt->name, "die_nmi")) clues++; if ((STREQ(spt->name, "nmi_watchdog_tick") || STREQ(spt->name, "default_do_nmi"))) { spc = x86_64_function_called_by((*up)-5); if (spc && STREQ(spc->name, "die_nmi")) clues += 2; } if (STREQ(spt->name, "crash_nmi_callback")) { up = (ulong *)(&ms->irqstack[ms->stkinfo.esize[NMI_STACK]]); up -= 2; ms->crash_nmi_rsp[c] = *up; } } if (clues >= 2) kt->cpu_flags[c] |= NMI; } if (symbol_exists("__sched_text_start") && (symbol_value("__sched_text_start") == symbol_value("schedule"))) machdep->flags |= SCHED_TEXT; } /* * No x86_64 swapper_pg_dir; initialize the vt->kernel_pgd[NR_CPUS] array * with the lazily-sync'd init_level4_pgt page address. The level4 page * could be taken from the per-cpu cpu_pda.level4_pgt pointer, but since * the kernel pgd_offset_k() is defined as shown below, we'll derive * the third-level pgd in the same manner: * * /@ This accesses the reference page table of the boot cpu. * Other CPUs get synced lazily via the page fault handler. @/ * * static inline pgd_t *pgd_offset_k(unsigned long address) * { * unsigned long addr; * * addr = pml4_val(init_level4_pgt[pml4_index(address)]); * addr &= PHYSICAL_PAGE_MASK; * return __pgd_offset_k((pgd_t *)__va(addr), address); * } */ static void x86_64_init_kernel_pgd(void) { int i; ulong init_level4_pgt; init_level4_pgt = symbol_value("init_level4_pgt"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = init_level4_pgt; FILL_PML4(); } /* * x86_64 __pa() clone. */ ulong x86_64_VTOP(ulong vaddr) { if (vaddr >= __START_KERNEL_map) return ((vaddr) - (ulong)__START_KERNEL_map + machdep->machspec->phys_base); else return ((vaddr) - PAGE_OFFSET); } /* * Include both vmalloc'd and module address space as VMALLOC space. */ int x86_64_IS_VMALLOC_ADDR(ulong vaddr) { return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END) || ((machdep->flags & VMEMMAP) && (vaddr >= VMEMMAP_VADDR && vaddr <= VMEMMAP_END)) || (vaddr >= MODULES_VADDR && vaddr <= MODULES_END)); } static int x86_64_is_module_addr(ulong vaddr) { return (vaddr >= MODULES_VADDR && vaddr <= MODULES_END); } /* * Refining this may cause more problems than just doing it this way. */ static int x86_64_is_kvaddr(ulong addr) { if (machdep->flags & VM_XEN_RHEL4) return (addr >= VMALLOC_START); else return (addr >= PAGE_OFFSET); } static int x86_64_is_uvaddr(ulong addr, struct task_context *tc) { return (addr < USERSPACE_TOP); } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(), just pass it to x86_64_kvtop(). */ static int x86_64_uvtop_level4(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pml; ulong pml_paddr; ulong pml_pte; ulong *pgd; ulong pgd_paddr; ulong pgd_pte; ulong *pmd; ulong pmd_paddr; ulong pmd_pte; ulong *ptep; ulong pte_paddr; ulong pte; physaddr_t physpage; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return x86_64_kvtop(tc, uvaddr, paddr, verbose); if ((mm = task_mm(tc->task, TRUE))) pml = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pml, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); pml_paddr = x86_64_VTOP((ulong)pml); FILL_UPML(pml_paddr, PHYSADDR, PAGESIZE()); pml = ((ulong *)pml_paddr) + pml4_index(uvaddr); pml_pte = ULONG(machdep->machspec->upml + PAGEOFFSET(pml)); if (verbose) fprintf(fp, " PML: %lx => %lx\n", (ulong)pml, pml_pte); if (!(pml_pte & _PAGE_PRESENT)) goto no_upage; pgd_paddr = pml_pte & PHYSICAL_PAGE_MASK; FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)pgd, pgd_pte); if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; /* * pmd = pmd_offset(pgd, address); */ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); if (!(pmd_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) goto no_upage; if (pmd_pte & _PAGE_PSE) { if (verbose) { fprintf(fp, " PAGE: %lx (2MB)\n\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pmd_pte, 0, 0); } physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, address); * pte = *ptep; */ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); if (!(pte & (_PAGE_PRESENT))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_64_uvtop_level4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pml; ulong pml_paddr; ulong pml_pte; ulong *pgd; ulong pgd_paddr; ulong pgd_pte; ulong *pmd; ulong pmd_paddr; ulong pmd_pte; ulong pseudo_pmd_pte; ulong *ptep; ulong pte_paddr; ulong pte; ulong pseudo_pte; physaddr_t physpage; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return x86_64_kvtop(tc, uvaddr, paddr, verbose); if ((mm = task_mm(tc->task, TRUE))) pml = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pml, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); pml_paddr = x86_64_VTOP((ulong)pml); FILL_UPML(pml_paddr, PHYSADDR, PAGESIZE()); pml = ((ulong *)pml_paddr) + pml4_index(uvaddr); pml_pte = ULONG(machdep->machspec->upml + PAGEOFFSET(pml)); if (verbose) fprintf(fp, " PML: %lx => %lx [machine]\n", (ulong)pml, pml_pte); if (!(pml_pte & _PAGE_PRESENT)) goto no_upage; pgd_paddr = pml_pte & PHYSICAL_PAGE_MASK; pgd_paddr = xen_m2p(pgd_paddr); if (verbose) fprintf(fp, " PML: %lx\n", pgd_paddr); FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); if (verbose) fprintf(fp, " PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; /* * pmd = pmd_offset(pgd, address); */ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; pmd_paddr = xen_m2p(pmd_paddr); if (verbose) fprintf(fp, " PUD: %lx\n", pmd_paddr); FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); if (verbose) fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); if (!(pmd_pte & _PAGE_PRESENT)) goto no_upage; if (pmd_pte & _PAGE_PSE) { if (verbose) fprintf(fp, " PAGE: %lx (2MB) [machine]\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); if (verbose) { fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK))); x86_64_translate_pte(pseudo_pmd_pte, 0, 0); } physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, address); * pte = *ptep; */ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; pte_paddr = xen_m2p(pte_paddr); if (verbose) fprintf(fp, " PMD: %lx\n", pte_paddr); FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); if (verbose) fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); if (!(pte & (_PAGE_PRESENT))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_upage; } pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); if (verbose) fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); if (verbose) { fprintf(fp, " PAGE: %lx [machine]\n", PAGEBASE(pte) & PHYSICAL_PAGE_MASK); fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pgd; ulong pgd_paddr; ulong pgd_pte; ulong *pmd; ulong pmd_paddr; ulong pmd_pte; ulong pseudo_pmd_pte; ulong *ptep; ulong pte_paddr; ulong pte; ulong pseudo_pte; physaddr_t physpage; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return x86_64_kvtop(tc, uvaddr, paddr, verbose); if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); pgd_paddr = x86_64_VTOP((ulong)pgd); FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); if (verbose) fprintf(fp, " PGD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; /* * pmd = pmd_offset(pgd, address); */ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; pmd_paddr = xen_m2p(pmd_paddr); if (verbose) fprintf(fp, " PGD: %lx\n", pmd_paddr); FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); if (verbose) fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); if (!(pmd_pte & _PAGE_PRESENT)) goto no_upage; if (pmd_pte & _PAGE_PSE) { if (verbose) fprintf(fp, " PAGE: %lx (2MB) [machine]\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); if (verbose) { fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK))); x86_64_translate_pte(pseudo_pmd_pte, 0, 0); } physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, address); * pte = *ptep; */ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; pte_paddr = xen_m2p(pte_paddr); if (verbose) fprintf(fp, " PMD: %lx\n", pte_paddr); FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); if (verbose) fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); if (!(pte & (_PAGE_PRESENT))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_upage; } pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); if (verbose) fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); if (verbose) { fprintf(fp, " PAGE: %lx [machine]\n", PAGEBASE(pte) & PHYSICAL_PAGE_MASK); fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pgd; ulong pgd_paddr; ulong pgd_pte; ulong *pmd; ulong pmd_paddr; ulong pmd_pte; ulong *ptep; ulong pte_paddr; ulong pte; physaddr_t physpage; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return x86_64_kvtop(tc, uvaddr, paddr, verbose); /* * pgd = pgd_offset(mm, address); */ if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); pgd_paddr = x86_64_VTOP((ulong)pgd); FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd, pgd_pte); if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; /* * pmd = pmd_offset(pgd, address); */ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); pmd = ((ulong *)pmd_paddr) + pmd_index(uvaddr); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); if (!(pmd_pte & _PAGE_PRESENT)) goto no_upage; if (pmd_pte & _PAGE_PSE) { if (verbose) { fprintf(fp, " PAGE: %lx (2MB)\n\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pmd_pte, 0, 0); } physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, address); * pte = *ptep; */ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ptep = ((ulong *)pte_paddr) + pte_index(uvaddr); pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); if (!(pte & (_PAGE_PRESENT))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pml4; ulong *pgd; ulong pgd_paddr; ulong pgd_pte; ulong *pmd; ulong pmd_paddr; ulong pmd_pte; ulong *ptep; ulong pte_paddr; ulong pte; physaddr_t physpage; if (!IS_KVADDR(kvaddr)) return FALSE; if (XEN_HYPER_MODE()) { if (XEN_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - XEN_VIRT_START + xen_phys_start(); return TRUE; } if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } FILL_PML4_HYPER(); pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); if (verbose) { fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4); } } else { if (!vt->vmalloc_start) { *paddr = x86_64_VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = x86_64_VTOP(kvaddr); if (!verbose) return TRUE; } if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) return (x86_64_kvtop_xen_wpt(tc, kvaddr, paddr, verbose)); /* * pgd = pgd_offset_k(addr); */ FILL_PML4(); pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); if (verbose) { fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); fprintf(fp, "PAGE DIRECTORY: %lx\n", *pml4); } } if (!(*pml4) & _PAGE_PRESENT) goto no_kpage; pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)pgd, pgd_pte); if (!(pgd_pte & _PAGE_PRESENT)) goto no_kpage; /* * pmd = pmd_offset(pgd, addr); */ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); if (!(pmd_pte & _PAGE_PRESENT)) goto no_kpage; if (pmd_pte & _PAGE_PSE) { if (verbose) { fprintf(fp, " PAGE: %lx (2MB)\n\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pmd_pte, 0, 0); } physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + (kvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, addr); * pte = *ptep; */ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ptep = ((ulong *)pte_paddr) + pte_index(kvaddr); pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); if (!(pte & (_PAGE_PRESENT))) { if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_kpage; } *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pte, 0, 0); } return TRUE; no_kpage: return FALSE; } static int x86_64_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pml4; ulong *pgd; ulong pgd_paddr; ulong pgd_pte; ulong *pmd; ulong pmd_paddr; ulong pmd_pte; ulong pseudo_pmd_pte; ulong *ptep; ulong pte_paddr; ulong pte; ulong pseudo_pte; physaddr_t physpage; char buf[BUFSIZE]; /* * pgd = pgd_offset_k(addr); */ FILL_PML4(); pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); if (verbose) { fprintf(fp, "PML4 DIRECTORY: %lx\n", vt->kernel_pgd[0]); fprintf(fp, "PAGE DIRECTORY: %lx [machine]\n", *pml4); } if (!(*pml4) & _PAGE_PRESENT) goto no_kpage; pgd_paddr = (*pml4) & PHYSICAL_PAGE_MASK; pgd_paddr = xen_m2p(pgd_paddr); if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd_paddr); FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); pgd = ((ulong *)pgd_paddr) + pgd_index(kvaddr); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); if (verbose) fprintf(fp, " PUD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); if (!(pgd_pte & _PAGE_PRESENT)) goto no_kpage; /* * pmd = pmd_offset(pgd, addr); */ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; pmd_paddr = xen_m2p(pmd_paddr); if (verbose) fprintf(fp, " PUD: %lx\n", pmd_paddr); FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); pmd = ((ulong *)pmd_paddr) + pmd_index(kvaddr); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); if (verbose) fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); if (!(pmd_pte & _PAGE_PRESENT)) goto no_kpage; if (pmd_pte & _PAGE_PSE) { if (verbose) fprintf(fp, " PAGE: %lx (2MB) [machine]\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); if (verbose) { fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK))); x86_64_translate_pte(pseudo_pmd_pte, 0, 0); } physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + (kvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, addr); * pte = *ptep; */ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; pte_paddr = xen_m2p(pte_paddr); if (verbose) fprintf(fp, " PMD: %lx\n", pte_paddr); FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ptep = ((ulong *)pte_paddr) + pte_index(kvaddr); pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); if (verbose) fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); if (!(pte & (_PAGE_PRESENT))) { if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_kpage; } pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); if (verbose) fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); if (verbose) { fprintf(fp, " PAGE: %lx [machine]\n", PAGEBASE(pte) & PHYSICAL_PAGE_MASK); fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); } return TRUE; no_kpage: return FALSE; } /* * Determine where vmalloc'd memory starts. */ static ulong x86_64_vmalloc_start(void) { return ((ulong)VMALLOC_START); } /* * thread_info implementation makes for less accurate results here. */ static int x86_64_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * easy enough... */ static ulong x86_64_processor_speed(void) { unsigned long cpu_khz = 0; if (machdep->mhz) return (machdep->mhz); if (symbol_exists("cpu_khz")) { get_symbol_data("cpu_khz", sizeof(int), &cpu_khz); if (cpu_khz) return(machdep->mhz = cpu_khz/1000); } return 0; } /* * Accept or reject a symbol from the kernel namelist. */ static int x86_64_verify_symbol(const char *name, ulong value, char type) { if (!name || !strlen(name)) return FALSE; if (XEN_HYPER_MODE() && STREQ(name, "__per_cpu_shift")) return TRUE; if (!(machdep->flags & KSYMS_START)) { if (STREQ(name, "_text") || STREQ(name, "_stext")) { machdep->flags |= KSYMS_START; if (!st->first_ksymbol) st->first_ksymbol = value; return TRUE; } else if (STREQ(name, "__per_cpu_start")) { st->flags |= PERCPU_SYMS; return TRUE; } else if (st->flags & PERCPU_SYMS) { if (STRNEQ(name, "per_cpu") || STREQ(name, "__per_cpu_end")) return TRUE; if ((type == 'V') || (type == 'd') || (type == 'D')) return TRUE; } return FALSE; } return TRUE; } /* * Prevent base kernel pc section ranges that end with a * vsyscall address from being accepted for kernel module * addresses. */ static int x86_64_verify_line_number(ulong pc, ulong low, ulong high) { if (IS_MODULE_VADDR(pc) && !IS_MODULE_VADDR(low) && is_vsyscall_addr(high)) return FALSE; return TRUE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong x86_64_get_task_pgd(ulong task) { return (error(FATAL, "x86_64_get_task_pgd: N/A\n")); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, others, len1, len2, len3; ulong paddr; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; int page_present; paddr = pte & PHYSICAL_PAGE_MASK; page_present = pte & (_PAGE_PRESENT | _PAGE_PROTNONE); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_PRESENT) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte & _PAGE_RW) fprintf(fp, "%sRW", others++ ? "|" : ""); if (pte & _PAGE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (pte & _PAGE_PWT) fprintf(fp, "%sPWT", others++ ? "|" : ""); if (pte & _PAGE_PCD) fprintf(fp, "%sPCD", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if ((pte & _PAGE_PSE) && (pte & _PAGE_PRESENT)) fprintf(fp, "%sPSE", others++ ? "|" : ""); if ((pte & _PAGE_PROTNONE) && !(pte & _PAGE_PRESENT)) fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); if (pte & _PAGE_GLOBAL) fprintf(fp, "%sGLOBAL", others++ ? "|" : ""); if (pte & _PAGE_NX) fprintf(fp, "%sNX", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return (page_present); } /* * Look for likely exception frames in a stack. */ static int x86_64_eframe_search(struct bt_info *bt) { int i, c, cnt, estack_index; ulong estack, irqstack, stacksize; ulong *up; struct machine_specific *ms; struct bt_info bt_local; if (bt->flags & BT_EFRAME_SEARCH2) { BCOPY(bt, &bt_local, sizeof(struct bt_info)); bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; ms = machdep->machspec; for (c = 0; c < kt->cpus; c++) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; if (ms->stkinfo.ibase[c] == 0) break; bt->hp->esp = ms->stkinfo.ibase[c]; fprintf(fp, "CPU %d IRQ STACK:", c); if (hide_offline_cpu(c)) { fprintf(fp, " [OFFLINE]\n\n"); continue; } else fprintf(fp, "\n"); if ((cnt = x86_64_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } for (c = 0; c < kt->cpus; c++) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i] == 0) break; bt->hp->esp = ms->stkinfo.ebase[c][i]; fprintf(fp, "CPU %d %s EXCEPTION STACK:", c, ms->stkinfo.exception_stacks[i]); if (hide_offline_cpu(c)) { fprintf(fp, " [OFFLINE]\n\n"); continue; } else fprintf(fp, "\n"); if ((cnt = x86_64_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } return 0; } if (bt->hp && bt->hp->esp) { ms = machdep->machspec; bt->stkptr = bt->hp->esp; if ((estack = x86_64_in_exception_stack(bt, &estack_index))) { stacksize = ms->stkinfo.esize[estack_index]; bt->stackbase = estack; bt->stacktop = estack + ms->stkinfo.esize[estack_index]; bt->stackbuf = ms->irqstack; alter_stackbuf(bt); } else if ((irqstack = x86_64_in_irqstack(bt))) { stacksize = ms->stkinfo.isize; bt->stackbase = irqstack; bt->stacktop = irqstack + ms->stkinfo.isize; bt->stackbuf = ms->irqstack; alter_stackbuf(bt); } else if (!INSTACK(bt->stkptr, bt)) error(FATAL, "unrecognized stack address for this task: %lx\n", bt->hp->esp); } stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); if (bt->stkptr) i = (bt->stkptr - bt->stackbase)/sizeof(ulong); else i = 0; for (cnt = 0; i <= stacksize/sizeof(ulong); i++) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT| EFRAME_VERIFY, 0, (char *)up, bt, fp)) cnt++; } return cnt; } static void x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp) { int i, u_idx; ulong *up; ulong words, addr; char buf[BUFSIZE]; if (rsp < bt->frameptr) return; if (!INSTACK(rsp, bt) || !INSTACK(bt->frameptr, bt)) return; words = (rsp - bt->frameptr) / sizeof(ulong) + 1; addr = bt->frameptr; u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(ofp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(ofp, "\n"); } /* * Check a frame for a requested reference. */ static void x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) { ulong offset; struct syment *sp = NULL; if (!name) sp = value_search(text, &offset); else if (!text) sp = symbol_search(name); switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (name) { if (STREQ(name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } else { if (sp && !offset && STREQ(sp->name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } break; case BT_REF_HEXVAL: if (text) { if (bt->ref->hexval == text) bt->ref->cmdflags |= BT_REF_FOUND; } else if (sp && (bt->ref->hexval == sp->value)) bt->ref->cmdflags |= BT_REF_FOUND; else if (!name && !text && (bt->ref->hexval == 0)) bt->ref->cmdflags |= BT_REF_FOUND; break; } } /* * Determine the function containing a .text.lock. reference. */ static ulong text_lock_function(char *name, struct bt_info *bt, ulong locktext) { int c, reterror, instr, arg; char buf[BUFSIZE]; char *arglist[MAXARGS]; char *p1; ulong locking_func; instr = arg = -1; locking_func = 0; open_tmpfile2(); if (STREQ(name, ".text.lock.spinlock")) sprintf(buf, "x/4i 0x%lx", locktext); else sprintf(buf, "x/1i 0x%lx", locktext); if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { close_tmpfile2(); bt->flags |= BT_FRAMESIZE_DISABLE; return 0; } rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { c = parse_line(buf, arglist); if (instr == -1) { /* * Check whether are * in the output string. */ if (LASTCHAR(arglist[0]) == ':') { instr = 1; arg = 2; } else { instr = 2; arg = 3; } } if (c < (arg+1)) break; if (STREQ(arglist[instr], "jmpq") || STREQ(arglist[instr], "jmp")) { p1 = arglist[arg]; reterror = 0; locking_func = htol(p1, RETURN_ON_ERROR, &reterror); if (reterror) locking_func = 0; break; } } close_tmpfile2(); if (!locking_func) bt->flags |= BT_FRAMESIZE_DISABLE; return locking_func; } /* * As of 2.6.29, the handy check for the "error_exit:" label * no longer applies; it became an entry point that was jmp'd to * after the exception handler was called. Therefore, if the * return address is an offset from any of these functions, * then the exception frame should be checked for: * * .macro errorentry sym do_sym * errorentry invalid_TSS do_invalid_TSS * errorentry segment_not_present do_segment_not_present * errorentry alignment_check do_alignment_check * errorentry xen_stack_segment do_stack_segment * errorentry general_protection do_general_protection * errorentry page_fault do_page_fault * * .macro zeroentry sym do_sym * zeroentry divide_error do_divide_error * zeroentry overflow do_overflow * zeroentry bounds do_bounds * zeroentry invalid_op do_invalid_op * zeroentry device_not_available do_device_not_available * zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun * zeroentry spurious_interrupt_bug do_spurious_interrupt_bug * zeroentry coprocessor_error do_coprocessor_error * zeroentry simd_coprocessor_error do_simd_coprocessor_error * zeroentry xen_hypervisor_callback xen_do_hypervisor_callback * zeroentry xen_debug do_debug * zeroentry xen_int3 do_int3 */ static const char *exception_functions[] = { "invalid_TSS", "segment_not_present", "alignment_check", "xen_stack_segment", "general_protection", "page_fault", "divide_error", "overflow", "bounds", "invalid_op", "device_not_available", "coprocessor_segment_overrun", "spurious_interrupt_bug", "coprocessor_error", "simd_coprocessor_error", "xen_hypervisor_callback", "xen_debug", "xen_int3", "async_page_fault", NULL, }; /* * print one entry of a stack trace */ #define BACKTRACE_COMPLETE (1) #define BACKTRACE_ENTRY_IGNORED (2) #define BACKTRACE_ENTRY_DISPLAYED (3) #define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4) static int x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, int stkindex, ulong text) { ulong rsp, offset, locking_func; struct syment *sp, *spl; char *name, *name_plus_offset; int i, result; long eframe_check; char buf1[BUFSIZE]; char buf2[BUFSIZE]; struct load_module *lm; eframe_check = -1; if (!(bt->flags & BT_SAVE_EFRAME_IP)) bt->eframe_ip = 0; offset = 0; sp = value_search(text, &offset); if (!sp) return BACKTRACE_ENTRY_IGNORED; name = sp->name; if (offset && (bt->flags & BT_SYMBOL_OFFSET)) name_plus_offset = value_to_symstr(text, buf2, bt->radix); else name_plus_offset = NULL; if (bt->flags & BT_TEXT_SYMBOLS) { if (bt->flags & BT_EXCEPTION_FRAME) rsp = bt->stkptr; else rsp = bt->stackbase + (stkindex * sizeof(long)); fprintf(ofp, " [%s] %s at %lx", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)), name_plus_offset ? name_plus_offset : name, text); if (module_symbol(text, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (BT_REFERENCE_CHECK(bt)) x86_64_do_bt_reference_check(bt, text, name); return BACKTRACE_ENTRY_DISPLAYED; } if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) && !(bt->flags & BT_START)) { if (STREQ(name, "child_rip")) { if (symbol_exists("kernel_thread")) name = "kernel_thread"; else if (symbol_exists("arch_kernel_thread")) name = "arch_kernel_thread"; } else if (!(bt->flags & BT_SCHEDULE)) { if (STREQ(name, "error_exit")) eframe_check = 8; else { if (CRASHDEBUG(2)) fprintf(ofp, "< ignoring text symbol with no offset: %s() >\n", sp->name); return BACKTRACE_ENTRY_IGNORED; } } } if ((THIS_KERNEL_VERSION >= LINUX(2,6,29)) && (eframe_check == -1) && offset && !(bt->flags & (BT_EXCEPTION_FRAME|BT_START|BT_SCHEDULE))) { for (i = 0; exception_functions[i]; i++) { if (STREQ(name, exception_functions[i])) { eframe_check = 8; break; } } if (x86_64_in_irqstack(bt) && strstr(name, "_interrupt")) eframe_check = 0; } if (bt->flags & BT_SCHEDULE) name = "schedule"; if (STREQ(name, "child_rip")) { if (symbol_exists("kernel_thread")) name = "kernel_thread"; else if (symbol_exists("arch_kernel_thread")) name = "arch_kernel_thread"; result = BACKTRACE_COMPLETE; } else if (STREQ(name, "cpu_idle") || STREQ(name, "system_call_fastpath")) result = BACKTRACE_COMPLETE; else result = BACKTRACE_ENTRY_DISPLAYED; if (bt->flags & BT_EXCEPTION_FRAME) rsp = bt->stkptr; else if (bt->flags & BT_START) rsp = bt->stkptr; else rsp = bt->stackbase + (stkindex * sizeof(long)); if ((bt->flags & BT_FULL)) { if (bt->frameptr) x86_64_display_full_frame(bt, rsp, ofp); bt->frameptr = rsp + sizeof(ulong); } fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, rsp, name_plus_offset ? name_plus_offset : name, text); if (STREQ(name, "tracesys")) fprintf(ofp, " (via system_call)"); else if (STRNEQ(name, ".text.lock.")) { if ((locking_func = text_lock_function(name, bt, text)) && (spl = value_search(locking_func, &offset))) fprintf(ofp, " (via %s)", spl->name); } if (module_symbol(text, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); if (bt->flags & BT_FRAMESIZE_DISABLE) fprintf(ofp, " *"); fprintf(ofp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(text, buf1, FALSE); if (strlen(buf1)) fprintf(ofp, " %s\n", buf1); } if (eframe_check >= 0) { if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, bt->stackbase + (stkindex*sizeof(long)) + eframe_check, NULL, bt, ofp)) result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED; } if (BT_REFERENCE_CHECK(bt)) x86_64_do_bt_reference_check(bt, text, name); bt->call_target = name; if (is_direct_call_target(bt)) { if (CRASHDEBUG(2)) fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", bt->call_target); bt->flags |= BT_CHECK_CALLER; } else { if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER)) fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", bt->call_target); if (bt->flags & BT_CHECK_CALLER) { if (CRASHDEBUG(2)) fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n"); bt->flags |= BT_NO_CHECK_CALLER; } bt->flags &= ~(ulonglong)BT_CHECK_CALLER; } return result; } /* * Unroll a kernel stack. */ static void x86_64_back_trace_cmd(struct bt_info *bt) { error(FATAL, "x86_64_back_trace_cmd: TBD\n"); } /* * Determine whether the initial stack pointer is located in one of the * exception stacks. */ static ulong x86_64_in_exception_stack(struct bt_info *bt, int *estack_index) { int c, i; ulong rsp; ulong estack; struct machine_specific *ms; rsp = bt->stkptr; ms = machdep->machspec; estack = 0; for (c = 0; !estack && (c < kt->cpus); c++) { for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i] == 0) break; if ((rsp >= ms->stkinfo.ebase[c][i]) && (rsp < (ms->stkinfo.ebase[c][i] + ms->stkinfo.esize[i]))) { estack = ms->stkinfo.ebase[c][i]; if (estack_index) *estack_index = i; if (CRASHDEBUG(1) && (c != bt->tc->processor)) error(INFO, "task cpu: %d exception stack cpu: %d\n", bt->tc->processor, c); break; } } } return estack; } /* * Determine whether the current stack pointer is in a cpu's irqstack. */ static ulong x86_64_in_irqstack(struct bt_info *bt) { int c; ulong rsp; ulong irqstack; struct machine_specific *ms; rsp = bt->stkptr; ms = machdep->machspec; irqstack = 0; for (c = 0; !irqstack && (c < kt->cpus); c++) { if (ms->stkinfo.ibase[c] == 0) break; if ((rsp >= ms->stkinfo.ibase[c]) && (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) { irqstack = ms->stkinfo.ibase[c]; if (CRASHDEBUG(1) && (c != bt->tc->processor)) error(INFO, "task cpu: %d IRQ stack cpu: %d\n", bt->tc->processor, c); break; } } return irqstack; } static int x86_64_in_alternate_stack(int cpu, ulong rsp) { int i; struct machine_specific *ms; if (cpu >= NR_CPUS) return FALSE; ms = machdep->machspec; if (ms->stkinfo.ibase[cpu] && (rsp >= ms->stkinfo.ibase[cpu]) && (rsp < (ms->stkinfo.ibase[cpu] + ms->stkinfo.isize))) return TRUE; for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[cpu][i] && (rsp >= ms->stkinfo.ebase[cpu][i]) && (rsp < (ms->stkinfo.ebase[cpu][i] + ms->stkinfo.esize[i]))) return TRUE; } return FALSE; } #define STACK_TRANSITION_ERRMSG_E_I_P \ "cannot transition from exception stack to IRQ stack to current process stack:\n exception stack pointer: %lx\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" #define STACK_TRANSITION_ERRMSG_E_P \ "cannot transition from exception stack to current process stack:\n exception stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" #define STACK_TRANSITION_ERRMSG_I_P \ "cannot transition from IRQ stack to current process stack:\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" /* * Low-budget back tracer -- dump text return addresses, following call chain * when possible, along with any verifiable exception frames. */ static void x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in) { int i, level, done, framesize, estack_index; ulong rsp, offset, stacktop; ulong *up; long cs; struct syment *sp, *spt; FILE *ofp; ulong estack, irqstack; ulong irq_eframe; struct bt_info bt_local, *bt; struct machine_specific *ms; ulong last_process_stack_eframe; ulong user_mode_eframe; char *rip_symbol; /* * User may have made a run-time switch. */ if (kt->flags & DWARF_UNWIND) { machdep->back_trace = x86_64_dwarf_back_trace_cmd; x86_64_dwarf_back_trace_cmd(bt_in); return; } bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); if (bt->flags & BT_FRAMESIZE_DEBUG) { x86_64_framesize_debug(bt); return; } level = 0; done = FALSE; irq_eframe = 0; last_process_stack_eframe = 0; bt->call_target = NULL; rsp = bt->stkptr; ms = machdep->machspec; if (BT_REFERENCE_CHECK(bt)) ofp = pc->nullfp; else ofp = fp; /* If rsp is in user stack, the memory may not be included in vmcore, and * we only output the register's value. So it's not necessary to check * whether it can be accessible. */ if (!(bt->flags & BT_USER_SPACE) && (!rsp || !accessible(rsp))) { error(INFO, "cannot determine starting stack pointer\n"); if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, ofp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, ofp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, ofp); return; } if (bt->flags & BT_TEXT_SYMBOLS) { if ((bt->flags & BT_USER_SPACE) && !(bt->flags & BT_TEXT_SYMBOLS_ALL)) return; if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) fprintf(ofp, "%sSTART: %s%s at %lx\n", space(VADDR_PRLEN > 8 ? 14 : 6), closest_symbol(bt->instptr), STREQ(closest_symbol(bt->instptr), "thread_return") ? " (schedule)" : "", bt->instptr); } else if (bt->flags & BT_USER_SPACE) { fprintf(ofp, " [exception RIP: user space]\n"); if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, ofp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, ofp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, ofp); else if (pc->flags2 & QEMU_MEM_DUMP_ELF) display_regs_from_elf_notes(bt->tc->processor, ofp); return; } else if ((bt->flags & BT_KERNEL_SPACE) && (KVMDUMP_DUMPFILE() || (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) || SADUMP_DUMPFILE() || (pc->flags2 & QEMU_MEM_DUMP_ELF))) { fprintf(ofp, " [exception RIP: "); if ((sp = value_search(bt->instptr, &offset))) { fprintf(ofp, "%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); } else fprintf(ofp, "unknown or invalid address"); fprintf(ofp, "]\n"); if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, ofp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, ofp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, ofp); else if (pc->flags2 & QEMU_MEM_DUMP_ELF) display_regs_from_elf_notes(bt->tc->processor, ofp); } else if (bt->flags & BT_START) { x86_64_print_stack_entry(bt, ofp, level, 0, bt->instptr); bt->flags &= ~BT_START; level++; } if ((estack = x86_64_in_exception_stack(bt, &estack_index))) { in_exception_stack: bt->flags |= BT_EXCEPTION_STACK; /* * The stack buffer will have been loaded with the process * stack, so switch to the indicated exception stack. */ bt->stackbase = estack; bt->stacktop = estack + ms->stkinfo.esize[estack_index]; bt->stackbuf = ms->irqstack; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, bt->hp && (bt->hp->esp == bt->stkptr) ? "irqstack contents via hook" : "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of exception stack at %lx failed\n", bt->stackbase); /* * If irq_eframe is set, we've jumped back here from the * IRQ stack dump below. Do basically the same thing as if * had come from the processor stack, but presume that we * must have been in kernel mode, i.e., took an exception * while operating on an IRQ stack. (untested) */ if (irq_eframe) { bt->flags |= BT_EXCEPTION_FRAME; i = (irq_eframe - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); rsp += SIZE(pt_regs); /* guaranteed kernel mode */ if (bt->eframe_ip && ((framesize = x86_64_get_framesize(bt, bt->eframe_ip, rsp)) >= 0)) rsp += framesize; level++; irq_eframe = 0; } stacktop = bt->stacktop - SIZE(pt_regs); if ((machdep->flags & NESTED_NMI) && estack_index == NMI_STACK) stacktop -= 12*sizeof(ulong); bt->flags &= ~BT_FRAMESIZE_DISABLE; for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: rsp += SIZE(pt_regs); i += SIZE(pt_regs)/sizeof(ulong); if (!bt->eframe_ip) { level++; break; } /* else fall through */ case BACKTRACE_ENTRY_DISPLAYED: level++; if ((framesize = x86_64_get_framesize(bt, bt->eframe_ip ? bt->eframe_ip : *up, rsp)) >= 0) { rsp += framesize; i += framesize/sizeof(ulong); } break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (stacktop - bt->stackbase), bt, ofp); if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- <%s exception stack> ---\n", ms->stkinfo.exception_stacks[estack_index]); /* * Find the CPU-saved, or handler-saved registers */ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); up -= 5; if ((machdep->flags & NESTED_NMI) && estack_index == NMI_STACK && bt->stkptr <= bt->stacktop - 17*sizeof(ulong)) { up -= 12; /* Copied and saved regs are swapped in pre-3.8 kernels */ if (*up == symbol_value("repeat_nmi")) up += 5; } /* Registers (as saved by CPU): * * up[4] SS * up[3] RSP * up[2] RFLAGS * up[1] CS * up[0] RIP */ rsp = bt->stkptr = up[3]; bt->instptr = up[0]; if (cs & 3) done = TRUE; /* user-mode exception */ else done = FALSE; /* kernel-mode exception */ bt->frameptr = 0; /* * Print the return values from the estack end. */ if (!done) { bt->flags |= BT_START|BT_SAVE_EFRAME_IP; x86_64_print_stack_entry(bt, ofp, level, 0, bt->instptr); bt->flags &= ~(BT_START|BT_SAVE_EFRAME_IP|BT_FRAMESIZE_DISABLE); /* * Protect against exception stack recursion. */ if (x86_64_in_exception_stack(bt, NULL) == estack) { fprintf(ofp, " [ %s exception stack recursion: " "prior stack location overwritten ]\n", ms->stkinfo.exception_stacks[estack_index]); return; } level++; if ((framesize = x86_64_get_framesize(bt, bt->instptr, rsp)) >= 0) rsp += framesize; } } /* * IRQ stack entry always comes in via the process stack, regardless * whether it happened while running in user or kernel space. */ if (!done && (irqstack = x86_64_in_irqstack(bt))) { bt->flags |= BT_IRQSTACK; /* * Until coded otherwise, the stackbase will be pointing to * either the exception stack or, more likely, the process * stack base. Switch it to the IRQ stack. */ bt->stackbase = irqstack; bt->stacktop = irqstack + ms->stkinfo.isize; bt->stackbuf = ms->irqstack; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, bt->hp && (bt->hp->esp == bt_in->stkptr) ? "irqstack contents via hook" : "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of IRQ stack at %lx failed\n", bt->stackbase); stacktop = bt->stacktop - 64; /* from kernel code */ bt->flags &= ~BT_FRAMESIZE_DISABLE; for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: rsp += SIZE(pt_regs); i += SIZE(pt_regs)/sizeof(ulong); if (!bt->eframe_ip) { level++; break; } /* else fall through */ case BACKTRACE_ENTRY_DISPLAYED: level++; if ((framesize = x86_64_get_framesize(bt, bt->eframe_ip ? bt->eframe_ip : *up, rsp)) >= 0) { rsp += framesize; i += framesize/sizeof(ulong); } break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- ---\n"); /* * stack = (unsigned long *) (irqstack_end[-1]); * (where irqstack_end is 64 bytes below page end) */ up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); up -= 1; irq_eframe = rsp = bt->stkptr = x86_64_irq_eframe_link(*up, bt, ofp); up -= 1; bt->instptr = *up; /* * No exception frame when coming from call_softirq. */ if ((sp = value_search(bt->instptr, &offset)) && STREQ(sp->name, "call_softirq")) irq_eframe = 0; bt->frameptr = 0; done = FALSE; } else irq_eframe = 0; if (!done && (estack = x86_64_in_exception_stack(bt, &estack_index))) goto in_exception_stack; if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { /* * Verify that the rsp pointer taken from either the * exception or IRQ stack points into the process stack. */ bt->stackbase = GET_STACKBASE(bt->tc->task); bt->stacktop = GET_STACKTOP(bt->tc->task); if (!INSTACK(rsp, bt)) { switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) { case (BT_EXCEPTION_STACK|BT_IRQSTACK): error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, bt_in->stkptr, bt->stkptr, rsp, bt->stackbase); case BT_EXCEPTION_STACK: if (in_user_stack(bt->tc->task, rsp)) { done = TRUE; break; } if (STREQ(closest_symbol(bt->instptr), "ia32_sysenter_target")) { /* * RSP 0 from MSR_IA32_SYSENTER_ESP? */ if (rsp == 0) return; done = TRUE; break; } error(FATAL, STACK_TRANSITION_ERRMSG_E_P, bt_in->stkptr, rsp, bt->stackbase); case BT_IRQSTACK: error(FATAL, STACK_TRANSITION_ERRMSG_I_P, bt_in->stkptr, rsp, bt->stackbase); } } /* * Now fill the local stack buffer from the process stack. */ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of process stack at %lx failed\n", bt->stackbase); } /* * For a normally blocked task, hand-create the first level(s). * associated with __schedule() and/or schedule(). */ if (!done && !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) && (rip_symbol = closest_symbol(bt->instptr)) && (STREQ(rip_symbol, "thread_return") || STREQ(rip_symbol, "schedule") || STREQ(rip_symbol, "__schedule"))) { if (STREQ(rip_symbol, "__schedule")) { i = (rsp - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); level++; rsp = __schedule_frame_adjust(rsp, bt); if (STREQ(closest_symbol(bt->instptr), "schedule")) bt->flags |= BT_SCHEDULE; } else bt->flags |= BT_SCHEDULE; if (bt->flags & BT_SCHEDULE) { i = (rsp - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); bt->flags &= ~(ulonglong)BT_SCHEDULE; rsp += sizeof(ulong); level++; } } /* * Dump the IRQ exception frame from the process stack. * If the CS register indicates a user exception frame, * then set done to TRUE to avoid the process stack walk-through. * Otherwise, bump up the rsp past the kernel-mode eframe. */ if (irq_eframe) { bt->flags |= BT_EXCEPTION_FRAME; i = (irq_eframe - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); if (cs & 3) done = TRUE; /* IRQ from user-mode */ else { if (x86_64_print_eframe_location(rsp, level, ofp)) level++; rsp += SIZE(pt_regs); irq_eframe = 0; bt->flags |= BT_EFRAME_TARGET; if (bt->eframe_ip && ((framesize = x86_64_get_framesize(bt, bt->eframe_ip, rsp)) >= 0)) rsp += framesize; bt->flags &= ~BT_EFRAME_TARGET; } level++; } /* * Walk the process stack. */ bt->flags &= ~BT_FRAMESIZE_DISABLE; for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; if ((bt->flags & BT_CHECK_CALLER)) { /* * A non-zero offset value from the value_search() * lets us know if it's a real text return address. */ if (!(spt = value_search(*up, &offset))) continue; if (!offset && !(bt->flags & BT_FRAMESIZE_DISABLE)) continue; /* * sp gets the syment of the function that the text * routine above called before leaving its return * address on the stack -- if it can be determined. */ sp = x86_64_function_called_by((*up)-5); if (sp == NULL) { /* * We were unable to get the called function. * If the text address had an offset, then * it must have made an indirect call, and * can't have called our target function. */ if (offset) { if (CRASHDEBUG(1)) fprintf(ofp, "< ignoring %s() -- makes indirect call and NOT %s()>\n", spt->name, bt->call_target); continue; } } else if ((machdep->flags & SCHED_TEXT) && STREQ(bt->call_target, "schedule") && STREQ(sp->name, "__sched_text_start")) { ; /* bait and switch */ } else if (!STREQ(sp->name, bt->call_target)) { /* * We got function called by the text routine, * but it's not our target function. */ if (CRASHDEBUG(2)) fprintf(ofp, "< ignoring %s() -- calls %s() and NOT %s()>\n", spt->name, sp->name, bt->call_target); continue; } } switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: last_process_stack_eframe = rsp + 8; if (x86_64_print_eframe_location(last_process_stack_eframe, level, ofp)) level++; rsp += SIZE(pt_regs); i += SIZE(pt_regs)/sizeof(ulong); if (!bt->eframe_ip) { level++; break; } /* else fall through */ case BACKTRACE_ENTRY_DISPLAYED: level++; if ((framesize = x86_64_get_framesize(bt, bt->eframe_ip ? bt->eframe_ip : *up, rsp)) >= 0) { rsp += framesize; i += framesize/sizeof(ulong); } break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } if (!irq_eframe && !is_kernel_thread(bt->tc->task) && (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { user_mode_eframe = bt->stacktop - SIZE(pt_regs); if (last_process_stack_eframe < user_mode_eframe) x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + (bt->stacktop - bt->stackbase) - SIZE(pt_regs), bt, ofp); } if (bt->flags & BT_TEXT_SYMBOLS) { if (BT_REFERENCE_FOUND(bt)) { print_task_header(fp, task_to_context(bt->task), 0); BCOPY(bt_in, bt, sizeof(struct bt_info)); bt->ref = NULL; machdep->back_trace(bt); fprintf(fp, "\n"); } } } /* * Use dwarf CFI encodings to correctly follow the call chain. */ static void x86_64_dwarf_back_trace_cmd(struct bt_info *bt_in) { int i, level, done, estack_index; ulong rsp, offset, stacktop; ulong *up; long cs; struct syment *sp; FILE *ofp; ulong estack, irqstack; ulong irq_eframe; struct bt_info bt_local, *bt; struct machine_specific *ms; ulong last_process_stack_eframe; ulong user_mode_eframe; /* * User may have made a run-time switch. */ if (!(kt->flags & DWARF_UNWIND)) { machdep->back_trace = x86_64_low_budget_back_trace_cmd; x86_64_low_budget_back_trace_cmd(bt_in); return; } bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); if (bt->flags & BT_FRAMESIZE_DEBUG) { dwarf_debug(bt); return; } level = 0; done = FALSE; irq_eframe = 0; last_process_stack_eframe = 0; bt->call_target = NULL; bt->bptr = 0; rsp = bt->stkptr; if (!rsp) { error(INFO, "cannot determine starting stack pointer\n"); return; } ms = machdep->machspec; if (BT_REFERENCE_CHECK(bt)) ofp = pc->nullfp; else ofp = fp; if (bt->flags & BT_TEXT_SYMBOLS) { if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) fprintf(ofp, "%sSTART: %s%s at %lx\n", space(VADDR_PRLEN > 8 ? 14 : 6), closest_symbol(bt->instptr), STREQ(closest_symbol(bt->instptr), "thread_return") ? " (schedule)" : "", bt->instptr); } else if (bt->flags & BT_START) { x86_64_print_stack_entry(bt, ofp, level, 0, bt->instptr); bt->flags &= ~BT_START; level++; } if ((estack = x86_64_in_exception_stack(bt, &estack_index))) { in_exception_stack: bt->flags |= BT_EXCEPTION_STACK; /* * The stack buffer will have been loaded with the process * stack, so switch to the indicated exception stack. */ bt->stackbase = estack; bt->stacktop = estack + ms->stkinfo.esize[estack_index]; bt->stackbuf = ms->irqstack; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, bt->hp && (bt->hp->esp == bt->stkptr) ? "irqstack contents via hook" : "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of exception stack at %lx failed\n", bt->stackbase); /* * If irq_eframe is set, we've jumped back here from the * IRQ stack dump below. Do basically the same thing as if * had come from the processor stack, but presume that we * must have been in kernel mode, i.e., took an exception * while operating on an IRQ stack. (untested) */ if (irq_eframe) { bt->flags |= BT_EXCEPTION_FRAME; i = (irq_eframe - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); rsp += SIZE(pt_regs); /* guaranteed kernel mode */ level++; irq_eframe = 0; } stacktop = bt->stacktop - SIZE(pt_regs); if ((machdep->flags & NESTED_NMI) && estack_index == NMI_STACK) stacktop -= 12*sizeof(ulong); if (!done) { level = dwarf_backtrace(bt, level, stacktop); done = TRUE; } cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (stacktop - bt->stackbase), bt, ofp); if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- ---\n"); /* * Find the CPU-saved, or handler-saved registers */ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); up -= 5; if ((machdep->flags & NESTED_NMI) && estack_index == NMI_STACK && bt->stkptr <= bt->stacktop - 17*sizeof(ulong)) { up -= 12; /* Copied and saved regs are swapped in pre-3.8 kernels */ if (*up == symbol_value("repeat_nmi")) up += 5; } /* Registers (as saved by CPU): * * up[4] SS * up[3] RSP * up[2] RFLAGS * up[1] CS * up[0] RIP */ rsp = bt->stkptr = up[3]; bt->instptr = up[0]; if (cs & 3) done = TRUE; /* user-mode exception */ else done = FALSE; /* kernel-mode exception */ bt->frameptr = 0; /* * Print the return values from the estack end. */ if (!done) { bt->flags |= BT_START; x86_64_print_stack_entry(bt, ofp, level, 0, bt->instptr); bt->flags &= ~BT_START; level++; } } /* * IRQ stack entry always comes in via the process stack, regardless * whether it happened while running in user or kernel space. */ if (!done && (irqstack = x86_64_in_irqstack(bt))) { bt->flags |= BT_IRQSTACK; /* * Until coded otherwise, the stackbase will be pointing to * either the exception stack or, more likely, the process * stack base. Switch it to the IRQ stack. */ bt->stackbase = irqstack; bt->stacktop = irqstack + ms->stkinfo.isize; bt->stackbuf = ms->irqstack; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, bt->hp && (bt->hp->esp == bt_in->stkptr) ? "irqstack contents via hook" : "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of IRQ stack at %lx failed\n", bt->stackbase); stacktop = bt->stacktop - 64; /* from kernel code */ if (!done) { level = dwarf_backtrace(bt, level, stacktop); done = TRUE; } if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- ---\n"); /* * stack = (unsigned long *) (irqstack_end[-1]); * (where irqstack_end is 64 bytes below page end) */ up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); up -= 1; irq_eframe = rsp = bt->stkptr = (*up) - ms->irq_eframe_link; up -= 1; bt->instptr = *up; /* * No exception frame when coming from call_softirq. */ if ((sp = value_search(bt->instptr, &offset)) && STREQ(sp->name, "call_softirq")) irq_eframe = 0; bt->frameptr = 0; done = FALSE; } else irq_eframe = 0; if (!done && (estack = x86_64_in_exception_stack(bt, &estack_index))) goto in_exception_stack; if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { /* * Verify that the rsp pointer taken from either the * exception or IRQ stack points into the process stack. */ bt->stackbase = GET_STACKBASE(bt->tc->task); bt->stacktop = GET_STACKTOP(bt->tc->task); if (!INSTACK(rsp, bt)) { switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) { case (BT_EXCEPTION_STACK|BT_IRQSTACK): error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, bt_in->stkptr, bt->stkptr, rsp, bt->stackbase); case BT_EXCEPTION_STACK: error(FATAL, STACK_TRANSITION_ERRMSG_E_P, bt_in->stkptr, rsp, bt->stackbase); case BT_IRQSTACK: error(FATAL, STACK_TRANSITION_ERRMSG_I_P, bt_in->stkptr, rsp, bt->stackbase); } } /* * Now fill the local stack buffer from the process stack. */ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of process stack at %lx failed\n", bt->stackbase); } /* * Dump the IRQ exception frame from the process stack. * If the CS register indicates a user exception frame, * then set done to TRUE to avoid the process stack walk-through. * Otherwise, bump up the rsp past the kernel-mode eframe. */ if (irq_eframe) { bt->flags |= BT_EXCEPTION_FRAME; level = dwarf_print_stack_entry(bt, level); bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); if (cs & 3) done = TRUE; /* IRQ from user-mode */ else { if (x86_64_print_eframe_location(rsp, level, ofp)) level++; rsp += SIZE(pt_regs); irq_eframe = 0; } level++; } /* * Walk the process stack. */ if (!done) { level = dwarf_backtrace(bt, level, bt->stacktop); done = TRUE; } if (!irq_eframe && !is_kernel_thread(bt->tc->task) && (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { user_mode_eframe = bt->stacktop - SIZE(pt_regs); if (last_process_stack_eframe < user_mode_eframe) x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + (bt->stacktop - bt->stackbase) - SIZE(pt_regs), bt, ofp); } if (bt->flags & BT_TEXT_SYMBOLS) { if (BT_REFERENCE_FOUND(bt)) { print_task_header(fp, task_to_context(bt->task), 0); BCOPY(bt_in, bt, sizeof(struct bt_info)); bt->ref = NULL; machdep->back_trace(bt); fprintf(fp, "\n"); } } } /* * Functions that won't be called indirectly. * Add more to this as they are discovered. */ static const char *direct_call_targets[] = { "schedule", "schedule_timeout", NULL }; static int is_direct_call_target(struct bt_info *bt) { int i; if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER)) return FALSE; if (strstr(bt->call_target, "schedule") && is_task_active(bt->task)) return FALSE; for (i = 0; direct_call_targets[i]; i++) { if (STREQ(direct_call_targets[i], bt->call_target)) return TRUE; } return FALSE; } static struct syment * x86_64_function_called_by(ulong rip) { struct syment *sp; char buf[BUFSIZE], *p1; ulong value, offset; unsigned char byte; value = 0; sp = NULL; if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte", QUIET|RETURN_ON_ERROR)) return sp; if (byte != 0xe8) return sp; sprintf(buf, "x/i 0x%lx", rip); open_tmpfile2(); if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { if ((p1 = strstr(buf, "callq")) && whitespace(*(p1-1))) { if (extract_hex(p1, &value, NULLCHAR, TRUE)) break; } } } close_tmpfile2(); if (value) sp = value_search(value, &offset); /* * Functions that jmp to schedule() or schedule_timeout(). */ if (sp) { if ((STREQ(sp->name, "schedule_timeout_interruptible") || STREQ(sp->name, "schedule_timeout_uninterruptible"))) sp = symbol_search("schedule_timeout"); if (STREQ(sp->name, "__cond_resched")) sp = symbol_search("schedule"); } return sp; } /* * Unroll the kernel stack using a minimal amount of gdb services. */ static void x86_64_back_trace(struct gnu_request *req, struct bt_info *bt) { error(FATAL, "x86_64_back_trace: unused\n"); } /* * Print exception frame information for x86_64. * * Pid: 0, comm: swapper Not tainted 2.6.5-1.360phro.rootsmp * RIP: 0010:[] {default_idle+36} * RSP: 0018:ffffffff8048bfd8 EFLAGS: 00000246 * RAX: 0000000000000000 RBX: ffffffff8010f510 RCX: 0000000000000018 * RDX: 0000010001e37280 RSI: ffffffff803ac0a0 RDI: 000001007f43c400 * RBP: 0000000000000000 R08: ffffffff8048a000 R09: 0000000000000000 * R10: ffffffff80482188 R11: 0000000000000001 R12: 0000000000000000 * R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 * FS: 0000002a96e14fc0(0000) GS:ffffffff80481d80(0000) GS:0000000055578aa0 * CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b * CR2: 0000002a9556b000 CR3: 0000000000101000 CR4: 00000000000006e0 * */ long x86_64_exception_frame(ulong flags, ulong kvaddr, char *local, struct bt_info *bt, FILE *ofp) { long rip, rsp, cs, ss, rflags, orig_rax, rbp; long rax, rbx, rcx, rdx, rsi, rdi; long r8, r9, r10, r11, r12, r13, r14, r15; struct machine_specific *ms; struct syment *sp; ulong offset; char *pt_regs_buf; long verified; long err; char buf[BUFSIZE]; ms = machdep->machspec; sp = NULL; if (!(machdep->flags & PT_REGS_INIT) || (flags == EFRAME_INIT)) { err = 0; err |= ((ms->pto.r15 = MEMBER_OFFSET("pt_regs", "r15")) == INVALID_OFFSET); err |= ((ms->pto.r14 = MEMBER_OFFSET("pt_regs", "r14")) == INVALID_OFFSET); err |= ((ms->pto.r13 = MEMBER_OFFSET("pt_regs", "r13")) == INVALID_OFFSET); err |= ((ms->pto.r12 = MEMBER_OFFSET("pt_regs", "r12")) == INVALID_OFFSET); err |= ((ms->pto.r11 = MEMBER_OFFSET("pt_regs", "r11")) == INVALID_OFFSET); err |= ((ms->pto.r10 = MEMBER_OFFSET("pt_regs", "r10")) == INVALID_OFFSET); err |= ((ms->pto.r9 = MEMBER_OFFSET("pt_regs", "r9")) == INVALID_OFFSET); err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == INVALID_OFFSET); err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == INVALID_OFFSET); err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == INVALID_OFFSET); /* * x86/x86_64 merge changed traditional register names. */ if (((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == INVALID_OFFSET) && ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "bp")) == INVALID_OFFSET)) err++; if (((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == INVALID_OFFSET) && ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "ax")) == INVALID_OFFSET)) err++; if (((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == INVALID_OFFSET) && ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "bx")) == INVALID_OFFSET)) err++; if (((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == INVALID_OFFSET) && ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "cx")) == INVALID_OFFSET)) err++; if (((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == INVALID_OFFSET) && ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "dx")) == INVALID_OFFSET)) err++; if (((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == INVALID_OFFSET) && ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "si")) == INVALID_OFFSET)) err++; if (((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == INVALID_OFFSET) && ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "di")) == INVALID_OFFSET)) err++; if (((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == INVALID_OFFSET) && ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "ip")) == INVALID_OFFSET)) err++; if (((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == INVALID_OFFSET) && ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "sp")) == INVALID_OFFSET)) err++; if (((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == INVALID_OFFSET) && ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "flags")) == INVALID_OFFSET)) err++; if (((ms->pto.orig_rax = MEMBER_OFFSET("pt_regs", "orig_rax")) == INVALID_OFFSET) && ((ms->pto.orig_rax = MEMBER_OFFSET("pt_regs", "orig_ax")) == INVALID_OFFSET)) err++; if (err) error(WARNING, "pt_regs structure has changed\n"); machdep->flags |= PT_REGS_INIT; if (flags == EFRAME_INIT) return err; } if (kvaddr) { pt_regs_buf = GETBUF(SIZE(pt_regs)); readmem(kvaddr, KVADDR, pt_regs_buf, SIZE(pt_regs), "pt_regs", FAULT_ON_ERROR); } else pt_regs_buf = local; rip = ULONG(pt_regs_buf + ms->pto.rip); rsp = ULONG(pt_regs_buf + ms->pto.rsp); cs = ULONG(pt_regs_buf + ms->pto.cs); ss = ULONG(pt_regs_buf + ms->pto.ss); rflags = ULONG(pt_regs_buf + ms->pto.eflags); orig_rax = ULONG(pt_regs_buf + ms->pto.orig_rax); rbp = ULONG(pt_regs_buf + ms->pto.rbp); rax = ULONG(pt_regs_buf + ms->pto.rax); rbx = ULONG(pt_regs_buf + ms->pto.rbx); rcx = ULONG(pt_regs_buf + ms->pto.rcx); rdx = ULONG(pt_regs_buf + ms->pto.rdx); rsi = ULONG(pt_regs_buf + ms->pto.rsi); rdi = ULONG(pt_regs_buf + ms->pto.rdi); r8 = ULONG(pt_regs_buf + ms->pto.r8); r9 = ULONG(pt_regs_buf + ms->pto.r9); r10 = ULONG(pt_regs_buf + ms->pto.r10); r11 = ULONG(pt_regs_buf + ms->pto.r11); r12 = ULONG(pt_regs_buf + ms->pto.r12); r13 = ULONG(pt_regs_buf + ms->pto.r13); r14 = ULONG(pt_regs_buf + ms->pto.r14); r15 = ULONG(pt_regs_buf + ms->pto.r15); verified = x86_64_eframe_verify(bt, kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase, cs, ss, rip, rsp, rflags); /* * If it's print-if-verified request, don't print bogus eframes. */ if (!verified && ((flags & (EFRAME_VERIFY|EFRAME_PRINT)) == (EFRAME_VERIFY|EFRAME_PRINT))) flags &= ~EFRAME_PRINT; else if (CRASHDEBUG(1) && verified && (flags != EFRAME_VERIFY)) fprintf(ofp, "< exception frame at: %lx >\n", kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase); if (flags & EFRAME_PRINT) { if (flags & EFRAME_SEARCH) { fprintf(ofp, "\n %s-MODE EXCEPTION FRAME AT: %lx\n", cs & 3 ? "USER" : "KERNEL", kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase); if (!(cs & 3)) { fprintf(ofp, " [exception RIP: "); if ((sp = value_search(rip, &offset))) { fprintf(ofp, "%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); } else fprintf(ofp, "unknown or invalid address"); fprintf(ofp, "]\n"); } } else if (!(cs & 3)) { fprintf(ofp, " [exception RIP: "); if ((sp = value_search(rip, &offset))) { fprintf(ofp, "%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); bt->eframe_ip = rip; } else fprintf(ofp, "unknown or invalid address"); fprintf(ofp, "]\n"); } fprintf(ofp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", rip, rsp, rflags); fprintf(ofp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", rax, rbx, rcx); fprintf(ofp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", rdx, rsi, rdi); fprintf(ofp, " RBP: %016lx R8: %016lx R9: %016lx\n", rbp, r8, r9); fprintf(ofp, " R10: %016lx R11: %016lx R12: %016lx\n", r10, r11, r12); fprintf(ofp, " R13: %016lx R14: %016lx R15: %016lx\n", r13, r14, r15); fprintf(ofp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", orig_rax, cs, ss); if (!(cs & 3) && sp && (bt->flags & BT_LINE_NUMBERS)) { get_line_number(rip, buf, FALSE); if (strlen(buf)) fprintf(ofp, " %s\n", buf); } if (!verified && CRASHDEBUG((pc->flags & RUNTIME) ? 0 : 1)) error(WARNING, "possibly bogus exception frame\n"); } if ((flags & EFRAME_PRINT) && BT_REFERENCE_CHECK(bt)) { x86_64_do_bt_reference_check(bt, rip, NULL); x86_64_do_bt_reference_check(bt, rsp, NULL); x86_64_do_bt_reference_check(bt, cs, NULL); x86_64_do_bt_reference_check(bt, ss, NULL); x86_64_do_bt_reference_check(bt, rflags, NULL); x86_64_do_bt_reference_check(bt, orig_rax, NULL); x86_64_do_bt_reference_check(bt, rbp, NULL); x86_64_do_bt_reference_check(bt, rax, NULL); x86_64_do_bt_reference_check(bt, rbx, NULL); x86_64_do_bt_reference_check(bt, rcx, NULL); x86_64_do_bt_reference_check(bt, rdx, NULL); x86_64_do_bt_reference_check(bt, rsi, NULL); x86_64_do_bt_reference_check(bt, rdi, NULL); x86_64_do_bt_reference_check(bt, r8, NULL); x86_64_do_bt_reference_check(bt, r9, NULL); x86_64_do_bt_reference_check(bt, r10, NULL); x86_64_do_bt_reference_check(bt, r11, NULL); x86_64_do_bt_reference_check(bt, r12, NULL); x86_64_do_bt_reference_check(bt, r13, NULL); x86_64_do_bt_reference_check(bt, r14, NULL); x86_64_do_bt_reference_check(bt, r15, NULL); } /* Remember the rip and rsp for unwinding the process stack */ if (kt->flags & DWARF_UNWIND){ bt->instptr = rip; bt->stkptr = rsp; bt->bptr = rbp; } if (kvaddr) FREEBUF(pt_regs_buf); if (flags & EFRAME_CS) return cs; else if (flags & EFRAME_VERIFY) return verified; return 0; } static int x86_64_print_eframe_location(ulong eframe, int level, FILE *ofp) { return FALSE; #ifdef NOTDEF ulong rip; char *pt_regs_buf; struct machine_specific *ms; struct syment *sp; ms = machdep->machspec; pt_regs_buf = GETBUF(SIZE(pt_regs)); if (!readmem(eframe, KVADDR, pt_regs_buf, SIZE(pt_regs), "pt_regs", RETURN_ON_ERROR|QUIET)) { FREEBUF(pt_regs_buf); return FALSE; } rip = ULONG(pt_regs_buf + ms->pto.rip); FREEBUF(pt_regs_buf); if (!(sp = value_search(rip, NULL))) return FALSE; fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level+1, eframe, sp->name, rip); return TRUE; #endif } /* * Check whether an RIP is in the FIXMAP vsyscall page. */ static int is_vsyscall_addr(ulong rip) { ulong page; if ((page = machdep->machspec->vsyscall_page)) if ((rip >= page) && (rip < (page+PAGESIZE()))) return TRUE; return FALSE; } struct syment * x86_64_value_to_symbol(ulong vaddr, ulong *offset) { struct syment *sp; if (is_vsyscall_addr(vaddr) && (sp = value_search_base_kernel(vaddr, offset))) return sp; return generic_machdep_value_to_symbol(vaddr, offset); } /* * Check that the verifiable registers contain reasonable data. */ #define RAZ_MASK 0xffffffffffc08028 /* return-as-zero bits */ static int x86_64_eframe_verify(struct bt_info *bt, long kvaddr, long cs, long ss, long rip, long rsp, long rflags) { int estack; struct syment *sp; ulong offset, exception; if ((rflags & RAZ_MASK) || !(rflags & 0x2)) return FALSE; if ((cs == 0x10) && (ss == 0x18)) { if (is_kernel_text(rip) && IS_KVADDR(rsp)) return TRUE; if (x86_64_is_module_addr(rip) && IS_KVADDR(rsp) && (rsp == (kvaddr + SIZE(pt_regs)))) return TRUE; if (is_kernel_text(rip) && (bt->flags & BT_EXCEPTION_STACK) && in_user_stack(bt->tc->task, rsp)) return TRUE; if (is_kernel_text(rip) && !IS_KVADDR(rsp) && (bt->flags & BT_EFRAME_SEARCH) && x86_64_in_exception_stack(bt, NULL)) return TRUE; if (is_kernel_text(rip) && x86_64_in_exception_stack(bt, &estack) && (estack <= 1)) return TRUE; /* * RSP may be 0 from MSR_IA32_SYSENTER_ESP. */ if (STREQ(closest_symbol(rip), "ia32_sysenter_target")) return TRUE; if ((rip == 0) && INSTACK(rsp, bt) && STREQ(bt->call_target, "ret_from_fork")) return TRUE; if (readmem(kvaddr - 8, KVADDR, &exception, sizeof(ulong), "exception type", RETURN_ON_ERROR|QUIET) && (sp = value_search(exception, &offset)) && STREQ(sp->name, "page_fault")) return TRUE; } if ((cs == 0x10) && kvaddr) { if (is_kernel_text(rip) && IS_KVADDR(rsp) && (rsp == (kvaddr + SIZE(pt_regs) + 8))) return TRUE; } if ((cs == 0x10) && kvaddr) { if (is_kernel_text(rip) && IS_KVADDR(rsp) && (rsp == (kvaddr + SIZE(pt_regs)))) return TRUE; } if ((cs == 0x10) && kvaddr) { if (is_kernel_text(rip) && IS_KVADDR(rsp) && x86_64_in_exception_stack(bt, NULL)) return TRUE; } if ((cs == 0x33) && (ss == 0x2b)) { if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) return TRUE; if (is_vsyscall_addr(rip) && IS_UVADDR(rsp, bt->tc)) return TRUE; } if (XEN() && ((cs == 0x33) || (cs == 0xe033)) && ((ss == 0x2b) || (ss == 0xe02b))) { if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) return TRUE; } if (XEN() && ((cs == 0x10000e030) || (cs == 0xe030)) && (ss == 0xe02b)) { if (is_kernel_text(rip) && IS_KVADDR(rsp)) return TRUE; } /* * 32-bit segments */ if ((cs == 0x23) && (ss == 0x2b)) { if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) return TRUE; } return FALSE; } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void x86_64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (bt->flags & BT_DUMPFILE_SEARCH) return x86_64_get_dumpfile_stack_frame(bt, pcp, spp); if (pcp) *pcp = x86_64_get_pc(bt); if (spp) *spp = x86_64_get_sp(bt); } /* * Get the starting point for the active cpus in a diskdump/netdump. */ static void x86_64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *rip, ulong *rsp) { int panic_task; int i, j, estack, panic, stage, in_nmi_stack; char *sym; struct syment *sp; ulong *up, *up2; struct bt_info bt_local, *bt; struct machine_specific *ms; char *user_regs; ulong ur_rip, ur_rsp; ulong halt_rip, halt_rsp; ulong crash_kexec_rip, crash_kexec_rsp; ulong call_function_rip, call_function_rsp; ulong sysrq_c_rip, sysrq_c_rsp; ulong notify_die_rip, notify_die_rsp; #define STACKTOP_INDEX(BT) (((BT)->stacktop - (BT)->stackbase)/sizeof(ulong)) bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); ms = machdep->machspec; ur_rip = ur_rsp = 0; halt_rip = halt_rsp = 0; crash_kexec_rip = crash_kexec_rsp = 0; call_function_rip = call_function_rsp = 0; notify_die_rsp = notify_die_rip = 0; sysrq_c_rip = sysrq_c_rsp = 0; in_nmi_stack = stage = 0; estack = -1; panic = FALSE; panic_task = tt->panic_task == bt->task ? TRUE : FALSE; if (panic_task && bt->machdep) { user_regs = bt->machdep; if (x86_64_eframe_verify(bt, 0, ULONG(user_regs + OFFSET(user_regs_struct_cs)), ULONG(user_regs + OFFSET(user_regs_struct_ss)), ULONG(user_regs + OFFSET(user_regs_struct_rip)), ULONG(user_regs + OFFSET(user_regs_struct_rsp)), ULONG(user_regs + OFFSET(user_regs_struct_eflags)))) { bt->stkptr = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); if (x86_64_in_irqstack(bt)) { ur_rip = ULONG(user_regs + OFFSET(user_regs_struct_rip)); ur_rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); goto skip_stage; } } } else if (ELF_NOTES_VALID() && bt->machdep) { user_regs = bt->machdep; ur_rip = ULONG(user_regs + OFFSET(user_regs_struct_rip)); ur_rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); } /* * Check the process stack first. */ next_stack: for (i = 0, up = (ulong *)bt->stackbuf; i < STACKTOP_INDEX(bt); i++, up++) { sym = closest_symbol(*up); if (XEN_CORE_DUMPFILE()) { if (STREQ(sym, "crash_kexec")) { sp = x86_64_function_called_by((*up)-5); if (sp && STREQ(sp->name, "machine_kexec")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } } if (STREQ(sym, "xen_machine_kexec")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } } else if (STREQ(sym, "netconsole_netdump") || STREQ(sym, "netpoll_start_netdump") || STREQ(sym, "start_disk_dump") || STREQ(sym, "disk_dump") || STREQ(sym, "crash_kexec") || STREQ(sym, "machine_kexec") || STREQ(sym, "try_crashdump")) { if (STREQ(sym, "crash_kexec")) { sp = x86_64_function_called_by((*up)-5); if (sp && STREQ(sp->name, "machine_kexec")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } } /* * Use second instance of crash_kexec if it exists. */ if (!(bt->flags & BT_TEXT_SYMBOLS) && STREQ(sym, "crash_kexec") && !crash_kexec_rip) { crash_kexec_rip = *up; crash_kexec_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); continue; } *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } if ((estack >= 0) && (STREQ(sym, "nmi_watchdog_tick") || STREQ(sym, "default_do_nmi"))) { sp = x86_64_function_called_by((*up)-5); if (!sp || !STREQ(sp->name, "die_nmi")) continue; *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); bt_in->flags |= BT_START; *rip = symbol_value("die_nmi"); *rsp = (*rsp) - (7*sizeof(ulong)); return; } if (STREQ(sym, "panic")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); panic = TRUE; continue; /* keep looking for die */ } if (STREQ(sym, "die")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); j = i; up2 = up; for (j++, up2++; j < STACKTOP_INDEX(bt); j++, up2++) { sym = closest_symbol(*up2); if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } return; } if (STREQ(sym, "sysrq_handle_crash")) { j = i; up2 = up; next_sysrq: sysrq_c_rip = *up2; sysrq_c_rsp = bt->stackbase + ((char *)(up2) - bt->stackbuf); pc->flags |= SYSRQ; for (j++, up2++; j < STACKTOP_INDEX(bt); j++, up2++) { sym = closest_symbol(*up2); if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } } if (!panic_task && (stage > 0) && (STREQ(sym, "smp_call_function_interrupt") || STREQ(sym, "stop_this_cpu"))) { call_function_rip = *up; call_function_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } if (!panic_task && STREQ(sym, "crash_nmi_callback")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } if (!panic_task && in_nmi_stack && (pc->flags2 & VMCOREINFO) && STREQ(sym, "notify_die")) { notify_die_rip = *up; notify_die_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && (stage == 0) && STREQ(sym, "safe_halt")) { halt_rip = *up; halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && !halt_rip && (stage == 0) && STREQ(sym, "xen_idle")) { halt_rip = *up; halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } if (!XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && !halt_rip && (stage == 0) && STREQ(sym, "cpu_idle")) { halt_rip = *up; halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } } if (panic) return; if (crash_kexec_rip) { *rip = crash_kexec_rip; *rsp = crash_kexec_rsp; return; } skip_stage: switch (stage) { /* * Now check the processor's interrupt stack. */ case 0: bt->stackbase = ms->stkinfo.ibase[bt->tc->processor]; bt->stacktop = ms->stkinfo.ibase[bt->tc->processor] + ms->stkinfo.isize; console("x86_64_get_dumpfile_stack_frame: searching IRQ stack at %lx\n", bt->stackbase); bt->stackbuf = ms->irqstack; alter_stackbuf(bt); stage = 1; goto next_stack; /* * Check the exception stacks. */ case 1: if (++estack == MAX_EXCEPTION_STACKS) break; bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][estack]; bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][estack] + ms->stkinfo.esize[estack]; console("x86_64_get_dumpfile_stack_frame: searching %s estack at %lx\n", ms->stkinfo.exception_stacks[estack], bt->stackbase); if (!(bt->stackbase)) goto skip_stage; bt->stackbuf = ms->irqstack; alter_stackbuf(bt); in_nmi_stack = STREQ(ms->stkinfo.exception_stacks[estack], "NMI"); goto next_stack; } if (sysrq_c_rip) { *rip = sysrq_c_rip; *rsp = sysrq_c_rsp; return; } if (notify_die_rip) { *rip = notify_die_rip; *rsp = notify_die_rsp; return; } /* * We didn't find what we were looking for, so just use what was * passed in from the ELF header. */ if (ur_rip && ur_rsp) { *rip = ur_rip; *rsp = ur_rsp; if (is_kernel_text(ur_rip) && (INSTACK(ur_rsp, bt_in) || in_alternate_stack(bt->tc->processor, ur_rsp))) bt_in->flags |= BT_KERNEL_SPACE; if (!is_kernel_text(ur_rip) && in_user_stack(bt->tc->task, ur_rsp)) bt_in->flags |= BT_USER_SPACE; return; } if (call_function_rip && call_function_rsp) { *rip = call_function_rip; *rsp = call_function_rsp; return; } if (halt_rip && halt_rsp) { *rip = halt_rip; *rsp = halt_rsp; if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE()) bt_in->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; return; } /* * Use what was (already) saved in the panic task's * registers found in the ELF header. */ if (bt->flags & BT_KDUMP_ELF_REGS) { user_regs = bt->machdep; ur_rip = ULONG(user_regs + OFFSET(user_regs_struct_rip)); ur_rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); if (!in_alternate_stack(bt->tc->processor, ur_rsp) && !stkptr_to_task(ur_rsp)) { if (CRASHDEBUG(1)) error(INFO, "x86_64_get_dumpfile_stack_frame: " "ELF mismatch: RSP: %lx task: %lx\n", ur_rsp, bt->task); } else { if (is_kernel_text(ur_rip) && (INSTACK(ur_rsp, bt_in) || in_alternate_stack(bt->tc->processor, ur_rsp))) bt_in->flags |= BT_KERNEL_SPACE; if (!is_kernel_text(ur_rip) && in_user_stack(bt->tc->task, ur_rsp)) bt_in->flags |= BT_USER_SPACE; return; } } if (CRASHDEBUG(1)) error(INFO, "x86_64_get_dumpfile_stack_frame: cannot find anything useful (task: %lx)\n", bt->task); if (XEN_CORE_DUMPFILE() && !panic_task && is_task_active(bt->task) && !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS))) error(FATAL, "starting backtrace locations of the active (non-crashing) " "xen tasks\n cannot be determined: try -t or -T options\n"); bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; machdep->get_stack_frame(bt, rip, rsp); if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE()) bt_in->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; } /* * Get the saved RSP from the task's thread_struct. */ static ulong x86_64_get_sp(struct bt_info *bt) { ulong offset, rsp; if (tt->flags & THREAD_INFO) { readmem(bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp), KVADDR, &rsp, sizeof(void *), "thread_struct rsp", FAULT_ON_ERROR); return rsp; } offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp); return GET_STACK_ULONG(offset); } /* * Get the saved PC from the task's thread_struct if it exists; * otherwise just use the pre-determined thread_return value. */ static ulong x86_64_get_pc(struct bt_info *bt) { ulong offset, rip; if (INVALID_MEMBER(thread_struct_rip)) return machdep->machspec->thread_return; if (tt->flags & THREAD_INFO) { readmem(bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_rip), KVADDR, &rip, sizeof(void *), "thread_struct rip", FAULT_ON_ERROR); if (rip) return rip; else return machdep->machspec->thread_return; } offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rip); return GET_STACK_ULONG(offset); } /* * Do the work for x86_64_get_sp() and x86_64_get_pc(). */ static void get_x86_64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { error(FATAL, "get_x86_64_frame: TBD\n"); } /* * Do the work for cmd_irq(). */ static void x86_64_dump_irq(int irq) { if (symbol_exists("irq_desc") || kernel_symbol_exists("irq_desc_ptrs") || kernel_symbol_exists("irq_desc_tree")) { machdep->dump_irq = generic_dump_irq; return(generic_dump_irq(irq)); } error(FATAL, "x86_64_dump_irq: irq_desc[] or irq_desc_tree do not exist?\n"); } static void x86_64_get_irq_affinity(int irq) { if (symbol_exists("irq_desc") || kernel_symbol_exists("irq_desc_ptrs") || kernel_symbol_exists("irq_desc_tree")) { machdep->get_irq_affinity = generic_get_irq_affinity; return(generic_get_irq_affinity(irq)); } error(FATAL, "x86_64_get_irq_affinity: irq_desc[] or irq_desc_tree do not exist?\n"); } static void x86_64_show_interrupts(int irq, ulong *cpus) { if (symbol_exists("irq_desc") || kernel_symbol_exists("irq_desc_ptrs") || kernel_symbol_exists("irq_desc_tree")) { machdep->show_interrupts = generic_show_interrupts; return(generic_show_interrupts(irq, cpus)); } error(FATAL, "x86_64_show_interrupts: irq_desc[] or irq_desc_tree do not exist?\n"); } /* * Do the work for irq -d */ void x86_64_display_idt_table(void) { int i; char *idt_table_buf; char buf[BUFSIZE]; ulong *ip; if (INVALID_SIZE(gate_struct)) { option_not_supported('d'); return; } idt_table_buf = GETBUF(SIZE(gate_struct) * 256); readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, SIZE(gate_struct) * 256, "idt_table", FAULT_ON_ERROR); ip = (ulong *)idt_table_buf; for (i = 0; i < 256; i++, ip += 2) { if (i < 10) fprintf(fp, " "); else if (i < 100) fprintf(fp, " "); fprintf(fp, "[%d] %s\n", i, x86_64_extract_idt_function(ip, buf, NULL)); } FREEBUF(idt_table_buf); } static void x86_64_exception_stacks_init(void) { char *idt_table_buf; char buf[BUFSIZE]; int i; ulong *ip, ist; long size; struct machine_specific *ms; ms = machdep->machspec; ms->stkinfo.NMI_stack_index = -1; for (i = 0; i < MAX_EXCEPTION_STACKS; i++) ms->stkinfo.exception_stacks[i] = "(unknown)"; if (!kernel_symbol_exists("idt_table")) return; if (INVALID_SIZE(gate_struct)) size = 16; else size = SIZE(gate_struct); idt_table_buf = GETBUF(size * 256); readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, size * 256, "idt_table", FAULT_ON_ERROR); ip = (ulong *)idt_table_buf; if (CRASHDEBUG(1)) fprintf(fp, "exception IST:\n"); for (i = 0; i < 256; i++, ip += 2) { ist = ((*ip) >> 32) & 0x7; if (ist) { x86_64_extract_idt_function(ip, buf, NULL); if (CRASHDEBUG(1)) fprintf(fp, " %ld: %s\n", ist, buf); if (strstr(buf, "nmi")) { ms->stkinfo.NMI_stack_index = ist-1; ms->stkinfo.exception_stacks[ist-1] = "NMI"; } if (strstr(buf, "debug")) ms->stkinfo.exception_stacks[ist-1] = "DEBUG"; if (strstr(buf, "stack")) ms->stkinfo.exception_stacks[ist-1] = "STACKFAULT"; if (strstr(buf, "double")) ms->stkinfo.exception_stacks[ist-1] = "DOUBLEFAULT"; if (strstr(buf, "machine")) ms->stkinfo.exception_stacks[ist-1] = "MCE"; } } if (CRASHDEBUG(1)) { fprintf(fp, "exception stacks:\n"); for (i = 0; i < MAX_EXCEPTION_STACKS; i++) fprintf(fp, " [%d]: %s\n", i, ms->stkinfo.exception_stacks[i]); } FREEBUF(idt_table_buf); } /* * Extract the function name out of the IDT entry. */ static char * x86_64_extract_idt_function(ulong *ip, char *buf, ulong *retaddr) { ulong i1, i2, addr; char locbuf[BUFSIZE]; physaddr_t phys; if (buf) BZERO(buf, BUFSIZE); i1 = *ip; i2 = *(ip+1); i2 <<= 32; addr = i2 & 0xffffffff00000000; addr |= (i1 & 0xffff); i1 >>= 32; addr |= (i1 & 0xffff0000); if (retaddr) *retaddr = addr; if (!buf) return NULL; value_to_symstr(addr, locbuf, 0); if (strlen(locbuf)) sprintf(buf, "%s", locbuf); else { sprintf(buf, "%016lx", addr); if (kvtop(NULL, addr, &phys, 0)) { addr = machdep->kvbase + (ulong)phys; if (value_to_symstr(addr, locbuf, 0)) { strcat(buf, " <"); strcat(buf, locbuf); strcat(buf, ">"); } } } return buf; } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int x86_64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on x86_64) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console(" IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } else if (STREQ(argv[argc-2], "callq") && hexadecimal(argv[argc-1], 0)) { /* * Update module code of the form: * * callq 0xffffffffa0017aa0 * * to show a bracketed direct call target. */ p1 = &LASTCHAR(inbuf); if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) { sprintf(buf1, " <%s>\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p1, "%s", buf1); } } if (value_symbol(vaddr) && (strstr(inbuf, "nopl 0x0(%rax,%rax,1)") || strstr(inbuf, "data32 data32 data32 xchg %ax,%ax"))) { strip_line_end(inbuf); strcat(inbuf, " [FTRACE NOP]\n"); } console("OUT: %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int x86_64_get_smp_cpus(void) { int i, cpus, nr_pda, cpunumber, _cpu_pda, _boot_cpu_pda; char *cpu_pda_buf; ulong level4_pgt, cpu_pda_addr; struct syment *sp; if (!VALID_STRUCT(x8664_pda)) { if (!(sp = per_cpu_symbol_search("per_cpu__cpu_number")) || !(kt->flags & PER_CPU_OFF)) return 1; for (i = cpus = 0; i < NR_CPUS; i++) { if (kt->__per_cpu_offset[i] == 0) break; if (!readmem(sp->value + kt->__per_cpu_offset[i], KVADDR, &cpunumber, sizeof(int), "cpu number (per_cpu)", QUIET|RETURN_ON_ERROR)) break; if (cpunumber != cpus) break; cpus++; } if ((i = get_cpus_present()) && (!cpus || (i < cpus))) cpus = get_highest_cpu_present() + 1; return cpus; } _boot_cpu_pda = FALSE; cpu_pda_buf = GETBUF(SIZE(x8664_pda)); if (LKCD_KERNTYPES()) { if (symbol_exists("_cpu_pda")) _cpu_pda = TRUE; else _cpu_pda = FALSE; nr_pda = get_cpus_possible(); } else { if (symbol_exists("_cpu_pda")) { if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0))) nr_pda = NR_CPUS; _cpu_pda = TRUE; } else { if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) nr_pda = NR_CPUS; _cpu_pda = FALSE; } } if (_cpu_pda) { if (symbol_exists("_boot_cpu_pda")) _boot_cpu_pda = TRUE; else _boot_cpu_pda = FALSE; } for (i = cpus = 0; i < nr_pda; i++) { if (_cpu_pda) { if (_boot_cpu_pda) { if (!_CPU_PDA_READ2(i, cpu_pda_buf)) break; } else { if (!_CPU_PDA_READ(i, cpu_pda_buf)) break; } } else { if (!CPU_PDA_READ(i, cpu_pda_buf)) break; } if (VALID_MEMBER(x8664_pda_level4_pgt)) { level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); if (!VALID_LEVEL4_PGT_ADDR(level4_pgt)) break; } cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); if (cpunumber != cpus) break; cpus++; } FREEBUF(cpu_pda_buf); return cpus; } /* * Machine dependent command. */ void x86_64_cmd_mach(void) { int c, cflag, mflag; unsigned int radix; cflag = mflag = radix = 0; while ((c = getopt(argcnt, args, "cmxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'm': mflag++; x86_64_display_memmap(); break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) x86_64_display_cpu_data(radix); if (!cflag && !mflag) x86_64_display_machine_stats(); } /* * "mach" command output. */ static void x86_64_display_machine_stats(void) { int i, c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d", kt->cpus); if (kt->cpus - get_cpus_to_display()) fprintf(fp, " [OFFLINE: %d]\n", kt->cpus - get_cpus_to_display()); else fprintf(fp, "\n"); if (!STREQ(kt->hypervisor, "(undetermined)") && !STREQ(kt->hypervisor, "bare hardware")) fprintf(fp, " HYPERVISOR: %s\n", kt->hypervisor); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, " KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, " KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); if (machdep->flags & VMEMMAP) fprintf(fp, " KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); fprintf(fp, " KERNEL START MAP: %lx\n", __START_KERNEL_map); fprintf(fp, " KERNEL MODULES BASE: %lx\n", MODULES_VADDR); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " IRQ STACK SIZE: %d\n", machdep->machspec->stkinfo.isize); fprintf(fp, " IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { sprintf(buf, "CPU %d", c); fprintf(fp, "%22s: %016lx", buf, machdep->machspec->stkinfo.ibase[c]); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (machdep->machspec->stkinfo.ebase[0][i] == 0) break; fprintf(fp, "%11s STACK SIZE: %d\n", machdep->machspec->stkinfo.exception_stacks[i], machdep->machspec->stkinfo.esize[i]); sprintf(buf, "%s STACKS:\n", machdep->machspec->stkinfo.exception_stacks[i]); fprintf(fp, "%24s", buf); for (c = 0; c < kt->cpus; c++) { if (machdep->machspec->stkinfo.ebase[c][i] == 0) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%22s: %016lx", buf, machdep->machspec->stkinfo.ebase[c][i]); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } } } /* * "mach -c" */ static void x86_64_display_cpu_data(unsigned int radix) { int cpu, cpus, boot_cpu, _cpu_pda; ulong cpu_data; ulong cpu_pda, cpu_pda_addr; struct syment *per_cpu; boot_cpu = _cpu_pda = FALSE; cpu_data = cpu_pda = 0; cpus = 0; per_cpu = NULL; if (symbol_exists("cpu_data")) { cpu_data = symbol_value("cpu_data"); cpus = kt->cpus; boot_cpu = FALSE; } else if ((per_cpu = per_cpu_symbol_search("per_cpu__cpu_info"))) { cpus = kt->cpus; boot_cpu = FALSE; } else if (symbol_exists("boot_cpu_data")) { cpu_data = symbol_value("boot_cpu_data"); boot_cpu = TRUE; cpus = 1; } if (symbol_exists("_cpu_pda")) { cpu_pda = symbol_value("_cpu_pda"); _cpu_pda = TRUE; } else if (symbol_exists("cpu_pda")) { cpu_pda = symbol_value("cpu_pda"); _cpu_pda = FALSE; } for (cpu = 0; cpu < cpus; cpu++) { if (boot_cpu) fprintf(fp, "BOOT CPU:\n"); else { if (hide_offline_cpu(cpu)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", cpu ? "\n" : "", cpu); continue; } else fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu); } if (per_cpu) cpu_data = per_cpu->value + kt->__per_cpu_offset[cpu]; dump_struct("cpuinfo_x86", cpu_data, radix); if (_cpu_pda) { readmem(cpu_pda, KVADDR, &cpu_pda_addr, sizeof(unsigned long), "_cpu_pda addr", FAULT_ON_ERROR); fprintf(fp, "\n"); dump_struct("x8664_pda", cpu_pda_addr, radix); cpu_pda += sizeof(void *); } else if (VALID_STRUCT(x8664_pda)) { fprintf(fp, "\n"); dump_struct("x8664_pda", cpu_pda, radix); cpu_pda += SIZE(x8664_pda); } if (!per_cpu) cpu_data += SIZE(cpuinfo_x86); } } /* * "mach -m" */ static char *e820type[] = { "(invalid type)", "E820_RAM", "E820_RESERVED", "E820_ACPI", "E820_NVS", "E820_UNUSABLE", }; static void x86_64_display_memmap(void) { ulong e820; int nr_map, i; char *buf, *e820entry_ptr; ulonglong addr, size; uint type; e820 = symbol_value("e820"); if (CRASHDEBUG(1)) dump_struct("e820map", e820, RADIX(16)); buf = (char *)GETBUF(SIZE(e820map)); readmem(e820, KVADDR, &buf[0], SIZE(e820map), "e820map", FAULT_ON_ERROR); nr_map = INT(buf + OFFSET(e820map_nr_map)); fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); for (i = 0; i < nr_map; i++) { e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i); addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr)); size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size)); type = UINT(e820entry_ptr + OFFSET(e820entry_type)); fprintf(fp, "%016llx - %016llx ", addr, addr+size); if (type >= (sizeof(e820type)/sizeof(char *))) fprintf(fp, "type %d\n", type); else fprintf(fp, "%s\n", e820type[type]); } } static const char *hook_files[] = { "arch/x86_64/kernel/entry.S", "arch/x86_64/kernel/head.S", "arch/x86_64/kernel/semaphore.c" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define SEMAPHORE_C ((char **)&hook_files[2]) static struct line_number_hook x86_64_line_number_hooks[] = { {"ret_from_fork", ENTRY_S}, {"system_call", ENTRY_S}, {"int_ret_from_sys_call", ENTRY_S}, {"ptregscall_common", ENTRY_S}, {"stub_execve", ENTRY_S}, {"stub_rt_sigreturn", ENTRY_S}, {"common_interrupt", ENTRY_S}, {"ret_from_intr", ENTRY_S}, {"load_gs_index", ENTRY_S}, {"arch_kernel_thread", ENTRY_S}, {"execve", ENTRY_S}, {"page_fault", ENTRY_S}, {"coprocessor_error", ENTRY_S}, {"simd_coprocessor_error", ENTRY_S}, {"device_not_available", ENTRY_S}, {"debug", ENTRY_S}, {"nmi", ENTRY_S}, {"int3", ENTRY_S}, {"overflow", ENTRY_S}, {"bounds", ENTRY_S}, {"invalid_op", ENTRY_S}, {"coprocessor_segment_overrun", ENTRY_S}, {"reserved", ENTRY_S}, {"double_fault", ENTRY_S}, {"invalid_TSS", ENTRY_S}, {"segment_not_present", ENTRY_S}, {"stack_segment", ENTRY_S}, {"general_protection", ENTRY_S}, {"alignment_check", ENTRY_S}, {"divide_error", ENTRY_S}, {"spurious_interrupt_bug", ENTRY_S}, {"machine_check", ENTRY_S}, {"call_debug", ENTRY_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void x86_64_dump_line_number(ulong callpc) { error(FATAL, "x86_64_dump_line_number: TBD\n"); } void x86_64_compiler_warning_stub(void) { struct line_number_hook *lhp; char **p ATTRIBUTE_UNUSED; lhp = &x86_64_line_number_hooks[0]; lhp++; p = ENTRY_S; x86_64_back_trace(NULL, NULL); get_x86_64_frame(NULL, NULL, NULL); x86_64_dump_line_number(0); } /* * Force the VM address-range selection via: * * --machdep vm=orig * --machdep vm=2.6.11 * * Force the phys_base address via: * * --machdep phys_base=
* * Force the IRQ stack back-link via: * * --machdep irq_eframe_link= * * Force max_physmem_bits via: * * --machdep max_physmem_bits= */ void parse_cmdline_args(void) { int index, i, c, errflag; char *p; char buf[BUFSIZE]; char *arglist[MAXARGS]; int megabytes; int lines = 0; int vm_flag; ulong value; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %s\n\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = vm_flag = 0; i < c; i++) { errflag = 0; if (STRNEQ(arglist[i], "vm=")) { vm_flag++; p = arglist[i] + strlen("vm="); if (strlen(p)) { if (STREQ(p, "orig")) { machdep->flags |= VM_ORIG; continue; } else if (STREQ(p, "2.6.11")) { machdep->flags |= VM_2_6_11; continue; } else if (STREQ(p, "xen")) { machdep->flags |= VM_XEN; continue; } else if (STREQ(p, "xen-rhel4")) { machdep->flags |= VM_XEN_RHEL4; continue; } } } else if (STRNEQ(arglist[i], "phys_base=")) { megabytes = FALSE; if ((LASTCHAR(arglist[i]) == 'm') || (LASTCHAR(arglist[i]) == 'M')) { LASTCHAR(arglist[i]) = NULLCHAR; megabytes = TRUE; } p = arglist[i] + strlen("phys_base="); if (strlen(p)) { if (megabytes) { value = dtol(p, RETURN_ON_ERROR|QUIET, &errflag); } else value = htol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { if (megabytes) value = MEGABYTES(value); machdep->machspec->phys_base = value; error(NOTE, "setting phys_base to: 0x%lx\n\n", machdep->machspec->phys_base); machdep->flags |= PHYS_BASE; continue; } } } else if (STRNEQ(arglist[i], "irq_eframe_link=")) { p = arglist[i] + strlen("irq_eframe_link="); if (strlen(p)) { value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { machdep->machspec->irq_eframe_link = value; continue; } } } else if (STRNEQ(arglist[i], "max_physmem_bits=")) { p = arglist[i] + strlen("max_physmem_bits="); if (strlen(p)) { value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { machdep->max_physmem_bits = value; error(NOTE, "setting max_physmem_bits to: %ld\n\n", machdep->max_physmem_bits); continue; } } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); lines++; } if (vm_flag) { switch (machdep->flags & VM_FLAGS) { case 0: break; case VM_ORIG: error(NOTE, "using original x86_64 VM address ranges\n"); lines++; break; case VM_2_6_11: error(NOTE, "using 2.6.11 x86_64 VM address ranges\n"); lines++; break; case VM_XEN: error(NOTE, "using xen x86_64 VM address ranges\n"); lines++; break; case VM_XEN_RHEL4: error(NOTE, "using RHEL4 xen x86_64 VM address ranges\n"); lines++; break; default: error(WARNING, "cannot set multiple vm values\n"); lines++; machdep->flags &= ~VM_FLAGS; break; } } if (lines) fprintf(fp, "\n"); } } void x86_64_clear_machdep_cache(void) { machdep->machspec->last_upml_read = 0; } #define PUSH_RBP_MOV_RSP_RBP 0xe5894855 static void x86_64_framepointer_init(void) { unsigned int push_rbp_mov_rsp_rbp; int i, check; char *checkfuncs[] = {"sys_open", "sys_fork", "sys_read", "do_futex", "do_fork", "_do_fork", "sys_write", "vfs_read", "__schedule"}; if (pc->flags & KERNEL_DEBUG_QUERY) return; for (i = check = 0; i < 9; i++) { if (!kernel_symbol_exists(checkfuncs[i])) continue; if (!readmem(symbol_value(checkfuncs[i]), KVADDR, &push_rbp_mov_rsp_rbp, sizeof(uint), "framepointer check", RETURN_ON_ERROR)) return; if ((push_rbp_mov_rsp_rbp == 0x66666666) || (push_rbp_mov_rsp_rbp == 0x00441f0f)) { if (!readmem(symbol_value(checkfuncs[i]) + 5, KVADDR, &push_rbp_mov_rsp_rbp, sizeof(uint), "framepointer check", RETURN_ON_ERROR)) return; } if (push_rbp_mov_rsp_rbp == PUSH_RBP_MOV_RSP_RBP) { if (++check > 2) { machdep->flags |= FRAMEPOINTER; break; } } } } static ulong search_for_switch_to(ulong start, ulong end) { ulong max_instructions, address; char buf1[BUFSIZE]; char search_string1[BUFSIZE]; char search_string2[BUFSIZE]; int found; max_instructions = end - start; found = FALSE; sprintf(buf1, "x/%ldi 0x%lx", max_instructions, start); if (symbol_exists("__switch_to")) { sprintf(search_string1, "callq 0x%lx", symbol_value("__switch_to")); sprintf(search_string2, "call 0x%lx", symbol_value("__switch_to")); } else { search_string1[0] = NULLCHAR; search_string2[0] = NULLCHAR; } open_tmpfile(); if (!gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR)) return FALSE; rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (found) break; if (strstr(buf1, "<__switch_to>")) found = TRUE; if (strlen(search_string1) && strstr(buf1, search_string1)) found = TRUE; if (strlen(search_string2) && strstr(buf1, search_string2)) found = TRUE; } close_tmpfile(); if (found && extract_hex(buf1, &address, ':', TRUE)) return address; return 0; } static void x86_64_thread_return_init(void) { struct syment *sp, *spn; ulong address; if ((sp = kernel_symbol_search("thread_return"))) { machdep->machspec->thread_return = sp->value; return; } if ((sp = kernel_symbol_search("schedule")) && (spn = next_symbol(NULL, sp)) && (address = search_for_switch_to(sp->value, spn->value))) { machdep->machspec->thread_return = address; return; } if ((sp = kernel_symbol_search("__schedule")) && (spn = next_symbol(NULL, sp)) && (address = search_for_switch_to(sp->value, spn->value))) { machdep->machspec->thread_return = address; return; } error(INFO, "cannot determine thread return address\n"); machdep->machspec->thread_return = (sp = kernel_symbol_search("schedule")) ? sp->value : 0; } static void x86_64_irq_eframe_link_init(void) { int c; struct syment *sp, *spn; char buf[BUFSIZE]; char link_register[BUFSIZE]; char *arglist[MAXARGS]; ulong max_instructions; if (machdep->machspec->irq_eframe_link == UNINITIALIZED) machdep->machspec->irq_eframe_link = 0; else return; if (THIS_KERNEL_VERSION < LINUX(2,6,9)) return; if (!(sp = symbol_search("common_interrupt")) || !(spn = next_symbol(NULL, sp))) { return; } max_instructions = spn->value - sp->value; open_tmpfile(); sprintf(buf, "x/%ldi 0x%lx", max_instructions, sp->value); if (!gdb_pass_through(buf, pc->tmpfile, GNU_RETURN_ON_ERROR)) return; link_register[0] = NULLCHAR; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (!strstr(buf, sp->name)) break; if ((c = parse_line(buf, arglist)) < 4) continue; if (strstr(arglist[2], "push")) strcpy(link_register, arglist[3]); } close_tmpfile(); if (CRASHDEBUG(1)) fprintf(fp, "IRQ stack link register: %s\n", strlen(link_register) ? link_register : "undetermined"); if (STREQ(link_register, "%rbp")) machdep->machspec->irq_eframe_link = 40; else if (THIS_KERNEL_VERSION >= LINUX(2,6,29)) machdep->machspec->irq_eframe_link = 40; } /* * Calculate and verify the IRQ exception frame location from the * stack reference at the top of the IRQ stack, possibly adjusting * the ms->irq_eframe_link value. */ static ulong x86_64_irq_eframe_link(ulong stkref, struct bt_info *bt, FILE *ofp) { ulong irq_eframe; irq_eframe = stkref - machdep->machspec->irq_eframe_link; if (x86_64_exception_frame(EFRAME_VERIFY, irq_eframe, 0, bt, ofp)) return irq_eframe; if (x86_64_exception_frame(EFRAME_VERIFY, irq_eframe+8, 0, bt, ofp)) { machdep->machspec->irq_eframe_link -= 8; return (irq_eframe + 8); } return irq_eframe; } #include "netdump.h" #include "xen_dom0.h" /* * From the xen vmcore, create an index of mfns for each page that makes * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. */ #define MAX_X86_64_FRAMES (512) #define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) static int x86_64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) { int i, j; ulong kvaddr; ulong *up; ulong frames; ulong frame_mfn[MAX_X86_64_FRAMES] = { 0 }; int mfns[MAX_X86_64_FRAMES] = { 0 }; struct syment *sp; /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); if (xkd->flags & KDUMP_CR3) goto use_cr3; if (CRASHDEBUG(1)) fprintf(fp, "x86_64_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), "xen kdump p2m mfn page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn page\n"); if (CRASHDEBUG(2)) x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list"); for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_64_FRAMES; i++, up++) frame_mfn[i] = *up; for (i = 0; i < MAX_X86_64_FRAMES; i++) { if (!frame_mfn[i]) break; if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn list page\n"); for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++) if (*up) mfns[i]++; xkd->p2m_frames += mfns[i]; if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list page"); } if (CRASHDEBUG(1)) fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); if ((xkd->p2m_mfn_frame_list = (ulong *) malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); for (i = 0, frames = xkd->p2m_frames; frames; i++) { if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn list page\n"); frames -= mfns[i]; } if (CRASHDEBUG(2)) { for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); fprintf(fp, "\n"); } pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; use_cr3: if (CRASHDEBUG(1)) fprintf(fp, "x86_64_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->machspec->pml4, PAGESIZE(), "xen kdump cr3 page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump cr3 page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, machdep->machspec->pml4, "contents of PML4 page:"); /* * kernel version < 2.6.27 => end_pfn * kernel version >= 2.6.27 => max_pfn */ if ((sp = symbol_search("end_pfn"))) kvaddr = sp->value; else kvaddr = symbol_value("max_pfn"); if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) return FALSE; up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if (CRASHDEBUG(1)) fprintf(fp, "end_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", kvaddr, *up, *up, xkd->p2m_frames); if ((xkd->p2m_mfn_frame_list = (ulong *) malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); kvaddr = symbol_value("phys_to_machine_mapping"); if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) return FALSE; up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); kvaddr = *up; if (CRASHDEBUG(1)) fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr); machdep->last_pgd_read = BADADDR; machdep->last_pmd_read = BADADDR; machdep->last_ptbl_read = BADADDR; for (i = 0; i < xkd->p2m_frames; i++) { xkd->p2m_mfn_frame_list[i] = x86_64_xen_kdump_page_mfn(kvaddr); kvaddr += PAGESIZE(); } if (CRASHDEBUG(1)) { for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); fprintf(fp, "\n"); } machdep->last_pgd_read = 0; machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; } static char * x86_64_xen_kdump_load_page(ulong kvaddr, char *pgbuf) { ulong mfn; ulong *pml4, *pgd, *pmd, *ptep; pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(fp, "[%lx] pml4: %lx mfn: %lx pml4_index: %lx\n", kvaddr, *pml4, mfn, pml4_index(kvaddr)); if (!readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(), "xen kdump pud page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pud page\n"); machdep->last_pgd_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, machdep->pgd, "contents of page upper directory page:"); pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(fp, "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", kvaddr, *pgd, mfn, pgd_index(kvaddr)); if (!readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), "xen kdump pmd page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pmd page\n"); machdep->last_pmd_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, machdep->pmd, "contents of page middle directory page:"); pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(fp, "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", kvaddr, *pmd, mfn, pmd_index(kvaddr)); if (!readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), "xen kdump page table page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find page table page\n"); machdep->last_ptbl_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, machdep->ptbl, "contents of page table page:"); ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(fp, "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", kvaddr, *ptep, mfn, pte_index(kvaddr)); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump page table page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pte page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, pgbuf, "contents of page:"); return pgbuf; } static ulong x86_64_xen_kdump_page_mfn(ulong kvaddr) { ulong mfn; ulong *pml4, *pgd, *pmd, *ptep; pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_pgd_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->pgd, PAGESIZE(), "xen kdump pud entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pud page\n"); machdep->last_pgd_read = mfn; pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_pmd_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), "xen kdump pmd entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pmd page\n"); machdep->last_pmd_read = mfn; pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_ptbl_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), "xen kdump page table page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find page table page\n"); machdep->last_ptbl_read = mfn; ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); return mfn; } #include "xendump.h" /* * Determine the physical address base for relocatable kernels. */ static void x86_64_calc_phys_base(void) { int i; FILE *iomem; char buf[BUFSIZE]; char *p1; ulong phys_base, text_start, kernel_code_start; int errflag; struct vmcore_data *vd; static struct xendump_data *xd; Elf64_Phdr *phdr; if (machdep->flags & PHYS_BASE) /* --machdep override */ return; machdep->machspec->phys_base = 0; /* default/traditional */ if (pc->flags2 & GET_LOG) text_start = BADADDR; else { if (!kernel_symbol_exists("phys_base")) return; if (!symbol_exists("_text")) return; else text_start = symbol_value("_text"); if (REMOTE()) { phys_base = get_remote_phys_base(text_start, symbol_value("phys_base")); if (phys_base) { machdep->machspec->phys_base = phys_base; if (CRASHDEBUG(1)) { fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "phys_base: %lx\n\n", machdep->machspec->phys_base); } return; } } } if (ACTIVE()) { if ((iomem = fopen("/proc/iomem", "r")) == NULL) return; errflag = 1; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, ": Kernel code")) { clean_line(buf); errflag = 0; break; } } fclose(iomem); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; else *p1 = NULLCHAR; errflag = 0; kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); if (errflag) return; machdep->machspec->phys_base = kernel_code_start - (text_start - __START_KERNEL_map); if (CRASHDEBUG(1)) { fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); fprintf(fp, "phys_base: %lx\n\n", machdep->machspec->phys_base); } return; } /* * Get relocation value from whatever dumpfile format is being used. */ if (DISKDUMP_DUMPFILE()) { if (diskdump_phys_base(&phys_base)) { machdep->machspec->phys_base = phys_base; if ((pc->flags2 & QEMU_MEM_DUMP_COMPRESSED) && !x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", machdep->machspec->phys_base); if (CRASHDEBUG(1)) fprintf(fp, "compressed kdump: phys_base: %lx\n", phys_base); } return; } if (KVMDUMP_DUMPFILE()) { if (kvmdump_phys_base(&phys_base)) { machdep->machspec->phys_base = phys_base; if (CRASHDEBUG(1)) fprintf(fp, "kvmdump: phys_base: %lx\n", phys_base); } else { machdep->machspec->phys_base = phys_base; if (!x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", phys_base); } return; } if (SADUMP_DUMPFILE()) { if (sadump_phys_base(&phys_base)) { machdep->machspec->phys_base = phys_base; if (CRASHDEBUG(1)) fprintf(fp, "sadump: phys_base: %lx\n", phys_base); } else { machdep->machspec->phys_base = phys_base; if (!x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", phys_base); } return; } if ((vd = get_kdump_vmcore_data())) { for (i = 0; i < vd->num_pt_load_segments; i++) { phdr = vd->load64 + i; if ((phdr->p_vaddr >= __START_KERNEL_map) && !(IS_VMALLOC_ADDR(phdr->p_vaddr))) { machdep->machspec->phys_base = phdr->p_paddr - (phdr->p_vaddr & ~(__START_KERNEL_map)); if (CRASHDEBUG(1)) { fprintf(fp, "p_vaddr: %lx p_paddr: %lx -> ", phdr->p_vaddr, phdr->p_paddr); fprintf(fp, "phys_base: %lx\n\n", machdep->machspec->phys_base); } break; } } if ((pc->flags2 & QEMU_MEM_DUMP_ELF) && !x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", machdep->machspec->phys_base); return; } if ((xd = get_xendump_data())) { if (text_start == __START_KERNEL_map) { /* * Xen kernels are not relocable (yet) and don't have * the "phys_base" entry point, so this is most likely * a xendump of a fully-virtualized relocatable kernel. * No clues exist in the xendump header, so hardwire * phys_base to 2MB and hope for the best. */ machdep->machspec->phys_base = 0x200000; if (CRASHDEBUG(1)) fprintf(fp, "default relocatable phys_base: %lx\n", machdep->machspec->phys_base); } else if (text_start > __START_KERNEL_map) { switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) { /* * If this is a new ELF-style xendump with no * p2m information, then it also must be a * fully-virtualized relocatable kernel. Again, * the xendump header is useless, and we don't * have /proc/iomem, so presume that the kernel * code starts at 2MB. */ case (XC_CORE_ELF|XC_CORE_NO_P2M): machdep->machspec->phys_base = 0x200000 - (text_start - __START_KERNEL_map); if (CRASHDEBUG(1)) fprintf(fp, "default relocatable " "phys_base: %lx\n", machdep->machspec->phys_base); break; default: break; } } if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC_HVM) x86_64_virt_phys_base(); } } /* * Verify, or possibly override, the xendump/kvmdump phys_base * calculation by trying to read linux_banner from a range of * typical physical offsets. */ static int x86_64_virt_phys_base(void) { char buf[BUFSIZE]; struct syment *sp; ulong phys, linux_banner_phys; if (!(sp = symbol_search("linux_banner")) || !((sp->type == 'R') || (sp->type == 'r'))) return FALSE; linux_banner_phys = sp->value - __START_KERNEL_map; if (readmem(linux_banner_phys + machdep->machspec->phys_base, PHYSADDR, buf, strlen("Linux version"), "linux_banner verify", QUIET|RETURN_ON_ERROR) && STRNEQ(buf, "Linux version")) return TRUE; for (phys = (ulong)(-MEGABYTES(16)); phys != MEGABYTES(16+1); phys += MEGABYTES(1)) { if (readmem(linux_banner_phys + phys, PHYSADDR, buf, strlen("Linux version"), "linux_banner search", QUIET|RETURN_ON_ERROR) && STRNEQ(buf, "Linux version")) { if (CRASHDEBUG(1)) fprintf(fp, "virtual dump phys_base: %lx %s\n", phys, machdep->machspec->phys_base != phys ? "override" : ""); machdep->machspec->phys_base = phys; return TRUE; } } return FALSE; } /* * Create an index of mfns for each page that makes up the * kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int x86_64_xendump_p2m_create(struct xendump_data *xd) { int i, idx; ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ulong *up; off_t offset; struct syment *sp; /* * Check for pvops Xen kernel before presuming it's HVM. */ if (symbol_exists("pv_init_ops") && symbol_exists("xen_patch") && (xd->xc_core.header.xch_magic == XC_CORE_MAGIC)) return x86_64_pvops_xendump_p2m_create(xd); if (!symbol_exists("phys_to_machine_mapping")) { xd->flags |= XC_CORE_NO_P2M; return TRUE; } if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == INVALID_OFFSET) error(FATAL, "cannot determine vcpu_guest_context.ctrlreg offset\n"); else if (CRASHDEBUG(1)) fprintf(xd->ofp, "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ctrlreg_offset); offset = xd->xc_core.header.xch_ctxt_offset + (off_t)ctrlreg_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_ctxt_offset\n"); if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != sizeof(ctrlreg)) error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); for (i = 0; CRASHDEBUG(1) && (i < 8); i++) fprintf(xd->ofp, "ctrlreg[%d]: %lx\n", i, ctrlreg[i]); mfn = ctrlreg[3] >> PAGESHIFT(); if (!xc_core_mfn_to_page(mfn, machdep->machspec->pml4)) error(FATAL, "cannot read/find cr3 page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->machspec->pml4, "contents of PML4 page:"); /* * kernel version < 2.6.27 => end_pfn * kernel version >= 2.6.27 => max_pfn */ if ((sp = symbol_search("end_pfn"))) kvaddr = sp->value; else kvaddr = symbol_value("max_pfn"); if (!x86_64_xendump_load_page(kvaddr, xd)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(xd->ofp, "end pfn: %lx\n", *up); xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if ((xd->xc_core.p2m_frame_index_list = (ulong *) malloc(xd->xc_core.p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_list"); kvaddr = symbol_value("phys_to_machine_mapping"); if (!x86_64_xendump_load_page(kvaddr, xd)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(fp, "phys_to_machine_mapping: %lx\n", *up); kvaddr = *up; machdep->last_ptbl_read = BADADDR; for (i = 0; i < xd->xc_core.p2m_frames; i++) { if ((idx = x86_64_xendump_page_index(kvaddr, xd)) == MFN_NOT_FOUND) return FALSE; xd->xc_core.p2m_frame_index_list[i] = idx; kvaddr += PAGESIZE(); } machdep->last_ptbl_read = 0; return TRUE; } static int x86_64_pvops_xendump_p2m_create(struct xendump_data *xd) { int i; ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ulong *up; off_t offset; struct syment *sp; if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == INVALID_OFFSET) error(FATAL, "cannot determine vcpu_guest_context.ctrlreg offset\n"); else if (CRASHDEBUG(1)) fprintf(xd->ofp, "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ctrlreg_offset); offset = xd->xc_core.header.xch_ctxt_offset + (off_t)ctrlreg_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_ctxt_offset\n"); if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != sizeof(ctrlreg)) error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); for (i = 0; CRASHDEBUG(1) && (i < 8); i++) fprintf(xd->ofp, "ctrlreg[%d]: %lx\n", i, ctrlreg[i]); mfn = ctrlreg[3] >> PAGESHIFT(); if (!xc_core_mfn_to_page(mfn, machdep->machspec->pml4)) error(FATAL, "cannot read/find cr3 page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->machspec->pml4, "contents of PML4 page:"); /* * kernel version < 2.6.27 => end_pfn * kernel version >= 2.6.27 => max_pfn */ if ((sp = symbol_search("end_pfn"))) kvaddr = sp->value; else kvaddr = symbol_value("max_pfn"); if (!x86_64_xendump_load_page(kvaddr, xd)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(xd->ofp, "end pfn: %lx\n", *up); xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if ((xd->xc_core.p2m_frame_index_list = (ulong *) malloc(xd->xc_core.p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_list"); if (symbol_exists("p2m_mid_missing")) return x86_64_pvops_xendump_p2m_l3_create(xd); else return x86_64_pvops_xendump_p2m_l2_create(xd); } static int x86_64_pvops_xendump_p2m_l2_create(struct xendump_data *xd) { int i, idx, p; ulong kvaddr, *up; machdep->last_ptbl_read = BADADDR; kvaddr = symbol_value("p2m_top"); for (p = 0; p < xd->xc_core.p2m_frames; p += XEN_PFNS_PER_PAGE) { if (!x86_64_xendump_load_page(kvaddr, xd)) return FALSE; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, xd->page, "contents of page:"); up = (ulong *)(xd->page); for (i = 0; i < XEN_PFNS_PER_PAGE; i++, up++) { if ((p+i) >= xd->xc_core.p2m_frames) break; if ((idx = x86_64_xendump_page_index(*up, xd)) == MFN_NOT_FOUND) return FALSE; xd->xc_core.p2m_frame_index_list[p+i] = idx; } kvaddr += PAGESIZE(); } machdep->last_ptbl_read = 0; return TRUE; } static int x86_64_pvops_xendump_p2m_l3_create(struct xendump_data *xd) { int i, idx, j, p2m_frame, ret = FALSE; ulong kvaddr, *p2m_mid, p2m_mid_missing, p2m_missing, *p2m_top; p2m_top = NULL; machdep->last_ptbl_read = BADADDR; kvaddr = symbol_value("p2m_missing"); if (!x86_64_xendump_load_page(kvaddr, xd)) goto err; p2m_missing = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); kvaddr = symbol_value("p2m_mid_missing"); if (!x86_64_xendump_load_page(kvaddr, xd)) goto err; p2m_mid_missing = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); kvaddr = symbol_value("p2m_top"); if (!x86_64_xendump_load_page(kvaddr, xd)) goto err; kvaddr = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (!x86_64_xendump_load_page(kvaddr, xd)) goto err; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, xd->page, "contents of p2m_top page:"); p2m_top = (ulong *)GETBUF(PAGESIZE()); memcpy(p2m_top, xd->page, PAGESIZE()); for (i = 0; i < XEN_P2M_TOP_PER_PAGE; ++i) { p2m_frame = i * XEN_P2M_MID_PER_PAGE; if (p2m_frame >= xd->xc_core.p2m_frames) break; if (p2m_top[i] == p2m_mid_missing) continue; if (!x86_64_xendump_load_page(p2m_top[i], xd)) goto err; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, xd->page, "contents of p2m_mid page:"); p2m_mid = (ulong *)xd->page; for (j = 0; j < XEN_P2M_MID_PER_PAGE; ++j, ++p2m_frame) { if (p2m_frame >= xd->xc_core.p2m_frames) break; if (p2m_mid[j] == p2m_missing) continue; idx = x86_64_xendump_page_index(p2m_mid[j], xd); if (idx == MFN_NOT_FOUND) goto err; xd->xc_core.p2m_frame_index_list[p2m_frame] = idx; } } machdep->last_ptbl_read = 0; ret = TRUE; err: if (p2m_top) FREEBUF(p2m_top); return ret; } static void x86_64_debug_dump_page(FILE *ofp, char *page, char *name) { int i; ulong *up; fprintf(ofp, "%s\n", name); up = (ulong *)page; for (i = 0; i < 256; i++) { fprintf(ofp, "%016lx: %016lx %016lx\n", (ulong)((i * 2) * sizeof(ulong)), *up, *(up+1)); up += 2; } } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * x86_64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) { ulong mfn; ulong *pml4, *pgd, *pmd, *ptep; pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(xd->ofp, "[%lx] pml4: %lx mfn: %lx pml4_index: %lx\n", kvaddr, *pml4, mfn, pml4_index(kvaddr)); if (!xc_core_mfn_to_page(mfn, machdep->pgd)) error(FATAL, "cannot read/find pud page\n"); machdep->last_pgd_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->pgd, "contents of page upper directory page:"); pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(xd->ofp, "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", kvaddr, *pgd, mfn, pgd_index(kvaddr)); if (!xc_core_mfn_to_page(mfn, machdep->pmd)) error(FATAL, "cannot read/find pmd page\n"); machdep->last_pmd_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->pmd, "contents of page middle directory page:"); pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(xd->ofp, "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", kvaddr, *pmd, mfn, pmd_index(kvaddr)); if (!xc_core_mfn_to_page(mfn, machdep->ptbl)) error(FATAL, "cannot read/find page table page\n"); machdep->last_ptbl_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->ptbl, "contents of page table page:"); ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(xd->ofp, "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", kvaddr, *ptep, mfn, pte_index(kvaddr)); if (!xc_core_mfn_to_page(mfn, xd->page)) error(FATAL, "cannot read/find pte page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, xd->page, "contents of page:"); return xd->page; } /* * Find the dumpfile page index associated with the kvaddr. */ static int x86_64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) { int idx; ulong mfn; ulong *pml4, *pgd, *pmd, *ptep; pml4 = ((ulong *)machdep->machspec->pml4) + pml4_index(kvaddr); mfn = ((*pml4) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_pgd_read) && !xc_core_mfn_to_page(mfn, machdep->pgd)) error(FATAL, "cannot read/find pud page\n"); machdep->last_pgd_read = mfn; pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_pmd_read) && !xc_core_mfn_to_page(mfn, machdep->pmd)) error(FATAL, "cannot read/find pmd page\n"); machdep->last_pmd_read = mfn; pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_ptbl_read) && !xc_core_mfn_to_page(mfn, machdep->ptbl)) error(FATAL, "cannot read/find page table page\n"); machdep->last_ptbl_read = mfn; ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) error(INFO, "cannot determine page index for %lx\n", kvaddr); return idx; } /* * Pull the rsp from the cpu_user_regs struct in the header * turn it into a task, and match it with the active_set. * Unfortunately, the registers in the vcpu_guest_context * are not necessarily those of the panic task, so for now * let get_active_set_panic_task() get the right task. */ static ulong x86_64_xendump_panic_task(struct xendump_data *xd) { int i; ulong rsp; off_t offset; ulong task; if (INVALID_MEMBER(vcpu_guest_context_user_regs) || INVALID_MEMBER(cpu_user_regs_esp)) return NO_TASK; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_rsp); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return NO_TASK; if (read(xd->xfd, &rsp, sizeof(ulong)) != sizeof(ulong)) return NO_TASK; if (IS_KVADDR(rsp) && (task = stkptr_to_task(rsp))) { for (i = 0; i < NR_CPUS; i++) { if (task == tt->active_set[i]) { if (CRASHDEBUG(0)) error(INFO, "x86_64_xendump_panic_task: rsp: %lx -> task: %lx\n", rsp, task); return task; } } error(WARNING, "x86_64_xendump_panic_task: rsp: %lx -> task: %lx (not active)\n", rsp); } return NO_TASK; } /* * Because of an off-by-one vcpu bug in early xc_domain_dumpcore() * instantiations, the registers in the vcpu_guest_context are not * necessarily those of the panic task. Furthermore, the rsp is * seemingly unassociated with the task, presumably due a hypervisor * callback, so only accept the contents if they retfer to the panic * task's stack. */ static void x86_64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) { ulong task, xrip, xrsp; off_t offset; struct syment *sp; char *rip_symbol; int cpu; if (INVALID_MEMBER(vcpu_guest_context_user_regs) || INVALID_MEMBER(cpu_user_regs_rip) || INVALID_MEMBER(cpu_user_regs_rsp)) goto generic; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_rsp); if (lseek(xd->xfd, offset, SEEK_SET) == -1) goto generic; if (read(xd->xfd, &xrsp, sizeof(ulong)) != sizeof(ulong)) goto generic; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_rip); if (lseek(xd->xfd, offset, SEEK_SET) == -1) goto generic; if (read(xd->xfd, &xrip, sizeof(ulong)) != sizeof(ulong)) goto generic; /* * This works -- comes from smp_send_stop call in panic. * But xendump_panic_hook() will forestall this function * from being called (for now). */ if (IS_KVADDR(xrsp) && (task = stkptr_to_task(xrsp)) && (task == bt->task)) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "hooks from vcpu_guest_context: rip: %lx rsp: %lx\n", xrip, xrsp); *rip = xrip; *rsp = xrsp; return; } generic: machdep->get_stack_frame(bt, rip, rsp); /* * If this is an active task showing itself in schedule(), * then the thread_struct rsp is stale. It has to be coming * from a callback via the interrupt stack. */ if (is_task_active(bt->task) && (rip_symbol = closest_symbol(*rip)) && (STREQ(rip_symbol, "thread_return") || STREQ(rip_symbol, "schedule"))) { cpu = bt->tc->processor; xrsp = machdep->machspec->stkinfo.ibase[cpu] + machdep->machspec->stkinfo.isize - sizeof(ulong); while (readmem(xrsp, KVADDR, &xrip, sizeof(ulong), "xendump rsp", RETURN_ON_ERROR)) { if ((sp = value_search(xrip, (ulong *)&offset)) && STREQ(sp->name, "smp_really_stop_cpu") && offset) { *rip = xrip; *rsp = xrsp; if (CRASHDEBUG(1)) error(INFO, "switch thread_return to smp_call_function_interrupt\n"); break; } xrsp -= sizeof(ulong); if (xrsp <= machdep->machspec->stkinfo.ibase[cpu]) break; } } } /* for XEN Hypervisor analysis */ static int x86_64_is_kvaddr_hyper(ulong addr) { return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); } static ulong x86_64_get_stackbase_hyper(ulong task) { struct xen_hyper_vcpu_context *vcc; struct xen_hyper_pcpu_context *pcc; ulong rsp0, base; /* task means vcpu here */ vcc = xen_hyper_vcpu_to_vcpu_context(task); if (!vcc) error(FATAL, "invalid vcpu\n"); pcc = xen_hyper_id_to_pcpu_context(vcc->processor); if (!pcc) error(FATAL, "invalid pcpu number\n"); rsp0 = pcc->sp.rsp0; base = rsp0 & (~(STACKSIZE() - 1)); return base; } static ulong x86_64_get_stacktop_hyper(ulong task) { return x86_64_get_stackbase_hyper(task) + STACKSIZE(); } #define EXCEPTION_STACKSIZE_HYPER (1024UL) static ulong x86_64_in_exception_stack_hyper(ulong vcpu, ulong rsp) { struct xen_hyper_vcpu_context *vcc; struct xen_hyper_pcpu_context *pcc; int i; ulong stackbase; vcc = xen_hyper_vcpu_to_vcpu_context(vcpu); if (!vcc) error(FATAL, "invalid vcpu\n"); pcc = xen_hyper_id_to_pcpu_context(vcc->processor); if (!pcc) error(FATAL, "invalid pcpu number\n"); for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++) { if (pcc->ist[i] == 0) { continue; } stackbase = pcc->ist[i] - EXCEPTION_STACKSIZE_HYPER; if ((rsp & ~(EXCEPTION_STACKSIZE_HYPER - 1)) == stackbase) { return stackbase; } } return 0; } static void x86_64_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp) { struct xen_hyper_vcpu_context *vcc; int pcpu; ulong *regs; ulong rsp, rip; /* task means vcpu here */ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); if (!vcc) error(FATAL, "invalid vcpu\n"); pcpu = vcc->processor; if (!xen_hyper_test_pcpu_id(pcpu)) { error(FATAL, "invalid pcpu number\n"); } if (bt->flags & BT_TEXT_SYMBOLS_ALL) { if (spp) *spp = x86_64_get_stackbase_hyper(bt->task); if (pcp) *pcp = 0; bt->flags &= ~BT_TEXT_SYMBOLS_ALL; return; } regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr; rsp = XEN_HYPER_X86_64_NOTE_RSP(regs); rip = XEN_HYPER_X86_64_NOTE_RIP(regs); if (spp) { if (x86_64_in_exception_stack_hyper(bt->task, rsp)) *spp = rsp; else if (rsp < x86_64_get_stackbase_hyper(bt->task) || rsp >= x86_64_get_stacktop_hyper(bt->task)) *spp = x86_64_get_stackbase_hyper(bt->task); else *spp = rsp; } if (pcp) { if (is_kernel_text(rip)) *pcp = rip; else *pcp = 0; } } static int x86_64_print_stack_entry_hyper(struct bt_info *bt, FILE *ofp, int level, int stkindex, ulong text) { ulong rsp, offset; struct syment *sp; char *name, *name_plus_offset; int result; char buf1[BUFSIZE]; char buf2[BUFSIZE]; offset = 0; sp = value_search(text, &offset); if (!sp) return BACKTRACE_ENTRY_IGNORED; name = sp->name; if (offset && (bt->flags & BT_SYMBOL_OFFSET)) name_plus_offset = value_to_symstr(text, buf2, bt->radix); else name_plus_offset = NULL; if (STREQ(name, "syscall_enter")) result = BACKTRACE_COMPLETE; else result = BACKTRACE_ENTRY_DISPLAYED; rsp = bt->stackbase + (stkindex * sizeof(long)); if ((bt->flags & BT_FULL)) { if (bt->frameptr) x86_64_display_full_frame(bt, rsp, ofp); bt->frameptr = rsp + sizeof(ulong); } fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level, rsp, name_plus_offset ? name_plus_offset : name, text); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(text, buf1, FALSE); if (strlen(buf1)) fprintf(ofp, " %s\n", buf1); } if (BT_REFERENCE_CHECK(bt)) x86_64_do_bt_reference_check(bt, text, name); return result; } static void x86_64_print_eframe_regs_hyper(struct bt_info *bt) { ulong *up; ulong offset; struct syment *sp; up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); up -= 21; fprintf(fp, " [exception RIP: "); if ((sp = value_search(up[16], &offset))) { fprintf(fp, "%s", sp->name); if (offset) fprintf(fp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); } else fprintf(fp, "unknown or invalid address"); fprintf(fp, "]\n"); fprintf(fp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", up[16], up[19], up[18]); fprintf(fp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", up[10], up[5], up[11]); fprintf(fp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", up[12], up[13], up[14]); fprintf(fp, " RBP: %016lx R8: %016lx R9: %016lx\n", up[4], up[9], up[8]); fprintf(fp, " R10: %016lx R11: %016lx R12: %016lx\n", up[7], up[6], up[3]); fprintf(fp, " R13: %016lx R14: %016lx R15: %016lx\n", up[2], up[1], up[0]); fprintf(fp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", up[15], up[17], up[20]); fprintf(fp, "--- ---\n"); } /* * simple back tracer for xen hypervisor * irq stack does not exist. so relative easy. */ static void x86_64_simple_back_trace_cmd_hyper(struct bt_info *bt_in) { int i, level, done; ulong rsp, estack, stacktop; ulong *up; FILE *ofp; struct bt_info bt_local, *bt; char ebuf[EXCEPTION_STACKSIZE_HYPER]; bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); if (bt->flags & BT_FRAMESIZE_DEBUG) { error(INFO, "-F not support\n"); return; } level = 0; done = FALSE; bt->call_target = NULL; rsp = bt->stkptr; if (!rsp) { error(INFO, "cannot determine starting stack pointer\n"); return; } if (BT_REFERENCE_CHECK(bt)) ofp = pc->nullfp; else ofp = fp; while ((estack = x86_64_in_exception_stack_hyper(bt->task, rsp))) { bt->flags |= BT_EXCEPTION_STACK; bt->stackbase = estack; bt->stacktop = estack + EXCEPTION_STACKSIZE_HYPER; bt->stackbuf = ebuf; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "exception stack contents", RETURN_ON_ERROR)) error(FATAL, "read of exception stack at %lx failed\n", bt->stackbase); stacktop = bt->stacktop - 168; for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_DISPLAYED: level++; break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } if (!BT_REFERENCE_CHECK(bt)) x86_64_print_eframe_regs_hyper(bt); up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); up -= 2; rsp = bt->stkptr = *up; up -= 3; bt->instptr = *up; done = FALSE; bt->frameptr = 0; } if (bt->flags & BT_EXCEPTION_STACK) { bt->flags &= ~BT_EXCEPTION_STACK; bt->stackbase = bt_in->stackbase; bt->stacktop = bt_in->stacktop; bt->stackbuf = bt_in->stackbuf; } for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_DISPLAYED: level++; break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } } static void x86_64_init_hyper(int when) { switch (when) { case PRE_SYMTAB: machdep->verify_symbol = x86_64_verify_symbol; machdep->machspec = &x86_64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; if ((machdep->machspec->upml = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc upml space."); if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); if ((machdep->machspec->pml4 = (char *)malloc(PAGESIZE()*2)) == NULL) error(FATAL, "cannot malloc pml4 space."); machdep->machspec->last_upml_read = 0; machdep->machspec->last_pml4_read = 0; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; if (machdep->cmdline_args[0]) parse_cmdline_args(); break; case PRE_GDB: machdep->machspec->page_offset = PAGE_OFFSET_XEN_HYPER; machdep->kvbase = (ulong)HYPERVISOR_VIRT_START; machdep->identity_map_base = (ulong)PAGE_OFFSET_XEN_HYPER; machdep->is_kvaddr = x86_64_is_kvaddr_hyper; machdep->is_uvaddr = x86_64_is_uvaddr; machdep->eframe_search = x86_64_eframe_search; machdep->back_trace = x86_64_simple_back_trace_cmd_hyper; machdep->processor_speed = x86_64_processor_speed; machdep->kvtop = x86_64_kvtop; machdep->get_task_pgd = x86_64_get_task_pgd; machdep->get_stack_frame = x86_64_get_stack_frame_hyper; machdep->get_stackbase = x86_64_get_stackbase_hyper; machdep->get_stacktop = x86_64_get_stacktop_hyper; machdep->translate_pte = x86_64_translate_pte; machdep->memory_size = xen_hyper_x86_memory_size; /* KAK add */ machdep->is_task_addr = x86_64_is_task_addr; machdep->dis_filter = x86_64_dis_filter; machdep->cmd_mach = x86_64_cmd_mach; machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus; /* KAK add */ machdep->line_number_hooks = x86_64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = x86_64_init_kernel_pgd; machdep->clear_machdep_cache = x86_64_clear_machdep_cache; /* machdep table for Xen Hypervisor */ xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init; break; case POST_GDB: XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); XEN_HYPER_STRUCT_SIZE_INIT(tss_struct, "tss_struct"); XEN_HYPER_ASSIGN_OFFSET(tss_struct_rsp0) = MEMBER_OFFSET("tss_struct", "__blh") + sizeof(short unsigned int); XEN_HYPER_MEMBER_OFFSET_INIT(tss_struct_ist, "tss_struct", "ist"); if (symbol_exists("cpu_data")) { xht->cpu_data_address = symbol_value("cpu_data"); } /* KAK Can this be calculated? */ if (!machdep->hz) { machdep->hz = XEN_HYPER_HZ; } break; case POST_INIT: break; } } struct framesize_cache { ulong textaddr; int framesize; int exception; }; static struct framesize_cache *x86_64_framesize_cache = NULL; static int framesize_cache_entries = 0; #define FRAMESIZE_QUERY (1) #define FRAMESIZE_ENTER (2) #define FRAMESIZE_DUMP (3) #define FRAMESIZE_CACHE_INCR (50) static int x86_64_framesize_cache_resize(void) { int i; struct framesize_cache *new_fc, *fc; if ((new_fc = realloc(x86_64_framesize_cache, (framesize_cache_entries+FRAMESIZE_CACHE_INCR) * sizeof(struct framesize_cache))) == NULL) { error(INFO, "cannot realloc x86_64_framesize_cache space!\n"); return FALSE; } fc = new_fc + framesize_cache_entries; for (i = framesize_cache_entries; i < (framesize_cache_entries+FRAMESIZE_CACHE_INCR); fc++, i++) { fc->textaddr = 0; fc->framesize = 0; fc->exception = 0; } x86_64_framesize_cache = new_fc; framesize_cache_entries += FRAMESIZE_CACHE_INCR; return TRUE; } static int x86_64_framesize_cache_func(int cmd, ulong textaddr, int *framesize, int exception) { int i, n; struct framesize_cache *fc; char buf[BUFSIZE]; if (!x86_64_framesize_cache) { framesize_cache_entries = FRAMESIZE_CACHE_INCR; if ((x86_64_framesize_cache = calloc(framesize_cache_entries, sizeof(struct framesize_cache))) == NULL) error(FATAL, "cannot calloc x86_64_framesize_cache space!\n"); } switch (cmd) { case FRAMESIZE_QUERY: fc = &x86_64_framesize_cache[0]; for (i = 0; i < framesize_cache_entries; i++, fc++) { if (fc->textaddr == textaddr) { if (fc->exception != exception) return FALSE; *framesize = fc->framesize; return TRUE; } } return FALSE; case FRAMESIZE_ENTER: retry: fc = &x86_64_framesize_cache[0]; for (i = 0; i < framesize_cache_entries; i++, fc++) { if ((fc->textaddr == 0) || (fc->textaddr == textaddr)) { if (*framesize == -1) { fc->textaddr = 0; fc->framesize = 0; fc->exception = 0; for (n = i+1; n < framesize_cache_entries; i++, n++) x86_64_framesize_cache[i] = x86_64_framesize_cache[n]; return 0; } fc->textaddr = textaddr; fc->framesize = *framesize; fc->exception = exception; return fc->framesize; } } if (x86_64_framesize_cache_resize()) goto retry; return *framesize; case FRAMESIZE_DUMP: fc = &x86_64_framesize_cache[0]; for (i = 0; i < framesize_cache_entries; i++, fc++) { if (fc->textaddr == 0) { if (i < (framesize_cache_entries-1)) { fprintf(fp, "[%d-%d]: (unused)\n", i, framesize_cache_entries-1); } break; } fprintf(fp, "[%3d]: %lx %3d %s (%s)\n", i, fc->textaddr, fc->framesize, fc->exception ? "EX" : "CF", value_to_symstr(fc->textaddr, buf, 0)); } break; } return TRUE; } ulong x86_64_get_framepointer(struct bt_info *bt, ulong rsp) { ulong stackptr, framepointer, retaddr; framepointer = 0; stackptr = rsp - sizeof(ulong); if (!INSTACK(stackptr, bt)) return 0; if (!readmem(stackptr, KVADDR, &framepointer, sizeof(ulong), "framepointer", RETURN_ON_ERROR|QUIET)) return 0; if (!INSTACK(framepointer, bt)) return 0; if (framepointer <= (rsp+sizeof(ulong))) return 0; if (!readmem(framepointer + sizeof(ulong), KVADDR, &retaddr, sizeof(ulong), "return address", RETURN_ON_ERROR|QUIET)) return 0; if (!is_kernel_text(retaddr)) return 0; return framepointer; } int search_for_eframe_target_caller(struct bt_info *bt, ulong stkptr, int *framesize) { int i; ulong *up, offset, rsp; struct syment *sp1, *sp2; char *called_function; if ((sp1 = value_search(bt->eframe_ip, &offset))) called_function = sp1->name; else return FALSE; rsp = stkptr; for (i = (rsp - bt->stackbase)/sizeof(ulong); rsp < bt->stacktop; i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; if (!(sp1 = value_search(*up, &offset))) continue; if (!offset && !(bt->flags & BT_FRAMESIZE_DISABLE)) continue; /* * Get the syment of the function that the text * routine above called before leaving its return * address on the stack -- if it can be determined. */ if ((sp2 = x86_64_function_called_by((*up)-5))) { if (STREQ(sp2->name, called_function)) { if (CRASHDEBUG(1)) { fprintf(fp, "< %lx/%s rsp: %lx caller: %s >\n", bt->eframe_ip, called_function, stkptr, sp1->name); } *framesize = rsp - stkptr; return TRUE; } } } return FALSE; } #define BT_FRAMESIZE_IGNORE_MASK \ (BT_OLD_BACK_TRACE|BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_ALL|BT_FRAMESIZE_DISABLE) static int x86_64_get_framesize(struct bt_info *bt, ulong textaddr, ulong rsp) { int c, framesize, instr, arg, max; struct syment *sp; long max_instructions; ulong offset; char buf[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS]; ulong locking_func, textaddr_save, current, framepointer; char *p1, *p2; int reterror; int arg_exists; int exception; if (!(bt->flags & BT_FRAMESIZE_DEBUG)) { if ((bt->flags & BT_FRAMESIZE_IGNORE_MASK) || (kt->flags & USE_OLD_BT)) return 0; } if (!(sp = value_search(textaddr, &offset))) { if (!(bt->flags & BT_FRAMESIZE_DEBUG)) bt->flags |= BT_FRAMESIZE_DISABLE; return 0; } exception = bt->eframe_ip == textaddr ? TRUE : FALSE; if ((bt->flags & BT_EFRAME_TARGET) && search_for_eframe_target_caller(bt, rsp, &framesize)) return framesize; if (!(bt->flags & BT_FRAMESIZE_DEBUG) && x86_64_framesize_cache_func(FRAMESIZE_QUERY, textaddr, &framesize, exception)) { if (framesize == -1) bt->flags |= BT_FRAMESIZE_DISABLE; return framesize; } /* * Bait and switch an incoming .text.lock address * with the containing function's address. */ if (STRNEQ(sp->name, ".text.lock.") && (locking_func = text_lock_function(sp->name, bt, textaddr))) { if (!(sp = value_search(locking_func, &offset))) { bt->flags |= BT_FRAMESIZE_DISABLE; return 0; } textaddr_save = textaddr; textaddr = locking_func; } else textaddr_save = 0; /* * As of 2.6.29, "irq_entries_start" replaced the range of IRQ * entry points named IRQ0x00_interrupt through IRQ0x##_interrupt. * Each IRQ entry point in the list of non-symbolically-named * entry stubs consists of a single pushq and a jmp. */ if (STREQ(sp->name, "irq_entries_start")) { #define PUSH_IMM8 0x6a if (readmem(textaddr, KVADDR, &instr, sizeof(short), "irq_entries_start instruction", QUIET|RETURN_ON_ERROR) && ((instr & 0xff) == PUSH_IMM8)) framesize = 0; else framesize = 8; return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize, exception)); } if ((machdep->flags & FRAMEPOINTER) && rsp && !exception && !textaddr_save) { framepointer = x86_64_get_framepointer(bt, rsp); if (CRASHDEBUG(3)) { if (framepointer) fprintf(fp, " rsp: %lx framepointer: %lx -> %ld\n", rsp, framepointer, framepointer - rsp); else fprintf(fp, " rsp: %lx framepointer: (unknown)\n", rsp); } if (framepointer) { framesize = framepointer - rsp; return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize, 0)); } } if ((sp->value >= kt->init_begin) && (sp->value < kt->init_end)) return 0; framesize = max = 0; max_instructions = textaddr - sp->value; instr = arg = -1; open_tmpfile2(); sprintf(buf, "x/%ldi 0x%lx", max_instructions, sp->value); if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { close_tmpfile2(); bt->flags |= BT_FRAMESIZE_DISABLE; return 0; } rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { strcpy(buf2, buf); if (CRASHDEBUG(3)) fprintf(fp, "%s", buf2); c = parse_line(buf, arglist); if (instr == -1) { /* * Check whether are * in the output string. */ if (LASTCHAR(arglist[0]) == ':') { instr = 1; arg = 2; } else { instr = 2; arg = 3; } } if (c < (instr+1)) continue; else if (c >= (arg+1)) arg_exists = TRUE; else arg_exists = FALSE; reterror = 0; current = htol(strip_ending_char(arglist[0], ':'), RETURN_ON_ERROR, &reterror); if (reterror) continue; if (current > textaddr) break; else if ((current == textaddr) && !exception) break; if (STRNEQ(arglist[instr], "push")) { framesize += 8; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: %d]\n", strip_linefeeds(buf2), framesize); max = framesize; } else if (STRNEQ(arglist[instr], "pop") || STRNEQ(arglist[instr], "leaveq")) { if (framesize > 0) framesize -= 8; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: %d]\n", strip_linefeeds(buf2), framesize); } else if (arg_exists && STRNEQ(arglist[instr], "add") && (p1 = strstr(arglist[arg], ",%rsp"))) { *p1 = NULLCHAR; p2 = arglist[arg]; reterror = 0; offset = htol(p2+1, RETURN_ON_ERROR, &reterror); if (reterror) continue; if (framesize > 0) framesize -= offset; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: %d]\n", strip_linefeeds(buf2), framesize); } else if (arg_exists && STRNEQ(arglist[instr], "sub") && (p1 = strstr(arglist[arg], ",%rsp"))) { *p1 = NULLCHAR; p2 = arglist[arg]; reterror = 0; offset = htol(p2+1, RETURN_ON_ERROR, &reterror); if (reterror) continue; framesize += offset; max = framesize; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: %d]\n", strip_linefeeds(buf2), framesize); } else if (STRNEQ(arglist[instr], "retq")) { if (!exception) { framesize = max; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize restored to: %d]\n", strip_linefeeds(buf2), max); } } else if (STRNEQ(arglist[instr], "retq_NOT_CHECKED")) { bt->flags |= BT_FRAMESIZE_DISABLE; framesize = -1; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: DISABLED]\n", strip_linefeeds(buf2)); break; } } close_tmpfile2(); if (textaddr_save) textaddr = textaddr_save; return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize, exception)); } static void x86_64_framesize_debug(struct bt_info *bt) { int framesize; int exception; exception = (bt->flags & BT_EFRAME_SEARCH); switch (bt->hp->esp) { case 1: /* "dump" */ x86_64_framesize_cache_func(FRAMESIZE_DUMP, 0, NULL, 0); break; case 0: if (bt->hp->eip) { /* clear one entry */ framesize = -1; x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, &framesize, exception); } else { /* clear all entries */ BZERO(&x86_64_framesize_cache[0], sizeof(struct framesize_cache)*framesize_cache_entries); fprintf(fp, "framesize cache cleared\n"); } break; case -1: if (!bt->hp->eip) error(INFO, "x86_64_framesize_debug: ignoring command\n"); else x86_64_get_framesize(bt, bt->hp->eip, 0); break; case -3: machdep->flags |= FRAMEPOINTER; BZERO(&x86_64_framesize_cache[0], sizeof(struct framesize_cache)*framesize_cache_entries); fprintf(fp, "framesize cache cleared and FRAMEPOINTER turned ON\n"); break; case -4: machdep->flags &= ~FRAMEPOINTER; BZERO(&x86_64_framesize_cache[0], sizeof(struct framesize_cache)*framesize_cache_entries); fprintf(fp, "framesize cache cleared and FRAMEPOINTER turned OFF\n"); break; default: if (bt->hp->esp > 1) { framesize = bt->hp->esp; if (bt->hp->eip) x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, &framesize, exception); } else error(INFO, "x86_64_framesize_debug: ignoring command\n"); break; } } /* * The __schedule() framesize should only have to be calculated * one time, but always verify that the previously-determined * framesize applies to this task, and if it doesn't, recalculate. * Update the bt->instptr here, and return the new stack pointer. */ static ulong __schedule_frame_adjust(ulong rsp_in, struct bt_info *bt) { int i, found; ulong rsp, *up; struct syment *sp; int framesize; if (!INSTACK(rsp_in, bt)) error(FATAL, "invalid RSP: %lx bt->stackbase/stacktop: %lx/%lx cpu: %d\n", rsp_in, bt->stackbase, bt->stacktop, bt->tc->processor); if (x86_64_framesize_cache_func(FRAMESIZE_QUERY, machdep->machspec->thread_return, &framesize, 0)) { rsp = rsp_in + framesize; i = (rsp - bt->stackbase)/sizeof(ulong); up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (is_kernel_text_offset(*up) && (sp = x86_64_function_called_by((*up)-5)) && STREQ(sp->name, "__schedule")) { bt->instptr = *up; return (rsp); } } rsp = rsp_in; for (found = FALSE, i = (rsp - bt->stackbase)/sizeof(ulong); rsp < bt->stacktop; i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text_offset(*up)) continue; if ((sp = x86_64_function_called_by((*up)-5)) && (STREQ(sp->name, "__schedule"))) { framesize = (int)(rsp - rsp_in); bt->instptr = *up; x86_64_framesize_cache_func(FRAMESIZE_ENTER, machdep->machspec->thread_return, &framesize, 0); bt->instptr = *up; found = TRUE; break; } } if (CRASHDEBUG(1) && !found) error(INFO, "cannot determine __schedule() caller\n"); return (found ? rsp : rsp_in); } static void x86_64_get_active_set(void) { int c; ulong current; struct task_context *actctx, *curctx; struct machine_specific *ms; if (ACTIVE()) return; ms = machdep->machspec; if (!ms->current) return; if (CRASHDEBUG(1)) fprintf(fp, "x86_64_get_active_set: runqueue vs. %s\n", VALID_STRUCT(x8664_pda) ? "x8664_pda" : "current_task"); for (c = 0; c < kt->cpus; c++) { if (!tt->active_set[c]) continue; current = ms->current[c]; curctx = task_to_context(current); actctx = task_to_context(tt->active_set[c]); if (CRASHDEBUG(1)) fprintf(fp, " [%d]: %016lx %016lx %s%s\n", c, tt->active_set[c], current, curctx ? "" : "(invalid task)", curctx && (curctx->processor != c) ? "(wrong processor)" : ""); if (!curctx || (curctx->processor != c)) continue; if (tt->active_set[c] == current) continue; if (tt->active_set[c] == tt->panic_task) continue; if (stkptr_to_task(ms->crash_nmi_rsp[c]) == curctx->task) tt->active_set[c] = tt->panic_threads[c] = current; error(INFO, "inconsistent active task indications for CPU %d:\n", c); error(CONT, " %srunqueue: %lx \"%s\" (default)\n", VALID_STRUCT(x8664_pda) ? "" : " ", actctx->task, actctx->comm); error(CONT, "%s: %lx \"%s\" %s\n%s", VALID_STRUCT(x8664_pda) ? " x8664_pda" : "current_task", current, curctx->comm, tt->active_set[c] == current ? "(reassigned)" : "", CRASHDEBUG(1) ? "" : "\n"); } } static int compare_kvaddr(const void *v1, const void *v2) { struct vaddr_range *r1, *r2; r1 = (struct vaddr_range *)v1; r2 = (struct vaddr_range *)v2; return (r1->start < r2->start ? -1 : r1->start == r2->start ? 0 : 1); } /* * Populate the vaddr_range array with a sorted list of * kernel virtual address ranges. The caller is responsible * for ensuring that the array is large enough, so it should * first call this function with a NULL vaddr_range pointer, * which will return the count of kernel virtual address * space ranges. */ static int x86_64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; ulong start; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->machspec->page_offset; vrp[cnt++].end = vt->high_memory; vrp[cnt].type = KVADDR_START_MAP; vrp[cnt].start = __START_KERNEL_map; vrp[cnt++].end = kt->end; vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = machdep->machspec->vmalloc_start_addr; vrp[cnt++].end = last_vmalloc_address(); /* * Verify that these two regions stand alone. */ if (st->mods_installed) { start = lowest_module_address(); if (!in_vmlist_segment(start)) { vrp[cnt].type = KVADDR_MODULES; vrp[cnt].start = start; vrp[cnt++].end = roundup(highest_module_address(), PAGESIZE()); } } if (machdep->flags & VMEMMAP) { start = machdep->machspec->vmemmap_vaddr; if (!in_vmlist_segment(start)) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = start; vrp[cnt++].end = vt->node_table[vt->numnodes-1].mem_map + (vt->node_table[vt->numnodes-1].size * SIZE(page)); } } qsort(vrp, cnt, sizeof(struct vaddr_range), compare_kvaddr); return cnt; } /* * Determine the physical memory range reserved for GART. */ static void GART_init(void) { char resource[BUFSIZE]; struct syment *sp; struct machine_specific *ms; if (!(sp = kernel_symbol_search("gart_resource"))) return; STRUCT_SIZE_INIT(resource, "resource"); MEMBER_OFFSET_INIT(resource_start, "resource", "start"); MEMBER_OFFSET_INIT(resource_end, "resource", "end"); if (VALID_STRUCT(resource) && VALID_MEMBER(resource_start) && VALID_MEMBER(resource_end)) { if (!readmem(sp->value, KVADDR, resource, SIZE(resource), "GART resource", RETURN_ON_ERROR)) return; ms = machdep->machspec; ms->GART_start = ULONG(resource + OFFSET(resource_start)); ms->GART_end = ULONG(resource + OFFSET(resource_end)); if (ms->GART_start && ms->GART_end) { machdep->flags |= GART_REGION; if (CRASHDEBUG(1)) fprintf(fp, "GART address range: %lx - %lx\n", ms->GART_start, ms->GART_end); } } } static int x86_64_verify_paddr(uint64_t paddr) { struct machine_specific *ms; if (machdep->flags & GART_REGION) { ms = machdep->machspec; if (ms->GART_start && ms->GART_end && (paddr >= ms->GART_start) && (paddr <= ms->GART_end)) return FALSE; } return TRUE; } #endif /* X86_64 */ crash-7.1.4/s390x.c0000775000000000000000000013473112634305150012370 0ustar rootroot/* s390.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2009-2014 David Anderson * Copyright (C) 2002-2006, 2009-2014 Red Hat, Inc. All rights reserved. * Copyright (C) 2005, 2006, 2010-2013 Michael Holzheu, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef S390X #include #include "defs.h" #include "netdump.h" #define S390X_WORD_SIZE 8 #define S390X_PAGE_BASE_MASK (~((1ULL<<12)-1)) /* Flags used in entries of page dirs and page tables. */ #define S390X_PAGE_PRESENT 0x001ULL /* set: loaded in physical memory * clear: not loaded in physical mem */ #define S390X_PAGE_RO 0x200ULL /* HW read-only */ #define S390X_PAGE_INVALID 0x400ULL /* HW invalid */ #define S390X_PAGE_INVALID_MASK 0x601ULL /* for linux 2.6 */ #define S390X_PAGE_INVALID_NONE 0x401ULL /* for linux 2.6 */ /* bits 52, 55 must contain zeroes in a pte */ #define S390X_PTE_INVALID_MASK 0x900ULL #define S390X_PTE_INVALID(x) ((x) & S390X_PTE_INVALID_MASK) #define INT_STACK_SIZE STACKSIZE() // can be 8192 or 16384 #define KERNEL_STACK_SIZE STACKSIZE() // can be 8192 or 16384 #define LOWCORE_SIZE 8192 #define VX_SA_SIZE (32 * 16) #define S390X_PSW_MASK_PSTATE 0x0001000000000000UL /* * S390x prstatus ELF Note */ struct s390x_nt_prstatus { uint8_t pad1[32]; uint32_t pr_pid; uint8_t pad2[76]; uint64_t psw[2]; uint64_t gprs[16]; uint32_t acrs[16]; uint64_t orig_gpr2; uint32_t pr_fpvalid; uint8_t pad3[4]; } __attribute__ ((packed)); /* * S390x floating point register ELF Note */ #ifndef NT_FPREGSET #define NT_FPREGSET 0x2 #endif struct s390x_nt_fpregset { uint32_t fpc; uint32_t pad; uint64_t fprs[16]; } __attribute__ ((packed)); struct s390x_vxrs { uint64_t low; uint64_t high; } __attribute__ ((packed)); /* * s390x CPU info */ struct s390x_cpu { uint64_t gprs[16]; uint64_t ctrs[16]; uint32_t acrs[16]; uint64_t fprs[16]; uint32_t fpc; uint64_t psw[2]; uint32_t prefix; uint64_t timer; uint64_t todcmp; uint32_t todpreg; uint64_t vxrs_low[16]; struct s390x_vxrs vxrs_high[16]; }; /* * declarations of static functions */ static void s390x_print_lowcore(char*, struct bt_info*,int); static int s390x_kvtop(struct task_context *, ulong, physaddr_t *, int); static int s390x_uvtop(struct task_context *, ulong, physaddr_t *, int); static int s390x_vtop(unsigned long, ulong, physaddr_t*, int); static ulong s390x_vmalloc_start(void); static int s390x_is_task_addr(ulong); static int s390x_verify_symbol(const char *, ulong, char type); static ulong s390x_get_task_pgd(ulong); static int s390x_translate_pte(ulong, void *, ulonglong); static ulong s390x_processor_speed(void); static int s390x_eframe_search(struct bt_info *); static void s390x_back_trace_cmd(struct bt_info *); static void s390x_get_stack_frame(struct bt_info *, ulong *, ulong *); static int s390x_dis_filter(ulong, char *, unsigned int); static void s390x_cmd_mach(void); static int s390x_get_smp_cpus(void); static void s390x_display_machine_stats(void); static void s390x_dump_line_number(ulong); static struct line_number_hook s390x_line_number_hooks[]; static int s390x_is_uvaddr(ulong, struct task_context *); static int s390x_get_kvaddr_ranges(struct vaddr_range *); static int set_s390x_max_physmem_bits(void); /* * struct lowcore name (old: "_lowcore", new: "lowcore") */ static char *lc_struct; /* * Read a unsigned long value from address */ static unsigned long readmem_ul(unsigned long addr) { unsigned long rc; readmem(addr, KVADDR, &rc, sizeof(rc), "readmem_ul", FAULT_ON_ERROR); return rc; } /* * Print hex data */ static void print_hex_buf(void *buf, int len, int cols, char *tag) { int j, first = 1; for (j = 0; j < len; j += 8) { if (j % (cols * 8) == 0) { if (first) first = 0; else fprintf(fp, "\n"); fprintf(fp, "%s", tag); } fprintf(fp, "%#018lx ", *((unsigned long *)(buf + j))); } if (len) fprintf(fp, "\n"); } /* * Initialize member offsets */ static void s390x_offsets_init(void) { if (STRUCT_EXISTS("lowcore")) lc_struct = "lowcore"; else lc_struct = "_lowcore"; if (MEMBER_EXISTS(lc_struct, "st_status_fixed_logout")) MEMBER_OFFSET_INIT(s390_lowcore_psw_save_area, lc_struct, "st_status_fixed_logout"); else MEMBER_OFFSET_INIT(s390_lowcore_psw_save_area, lc_struct, "psw_save_area"); if (!STRUCT_EXISTS("stack_frame")) { ASSIGN_OFFSET(s390_stack_frame_back_chain) = 0; ASSIGN_OFFSET(s390_stack_frame_r14) = 112; ASSIGN_SIZE(s390_stack_frame) = 160; } else { ASSIGN_OFFSET(s390_stack_frame_back_chain) = MEMBER_OFFSET("stack_frame", "back_chain"); ASSIGN_OFFSET(s390_stack_frame_r14) = MEMBER_OFFSET("stack_frame", "gprs") + 8 * 8; ASSIGN_SIZE(s390_stack_frame) = STRUCT_SIZE("stack_frame"); } } /* * MAX_PHYSMEM_BITS is 42 on older kernels, and 46 on newer kernels. */ static int set_s390x_max_physmem_bits(void) { int array_len, dimension; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_OLD; if (!kernel_symbol_exists("mem_section")) return TRUE; if (!(array_len = get_array_length("mem_section", &dimension, 0))) return FALSE; /* * !CONFIG_SPARSEMEM_EXTREME */ if (dimension) { machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_OLD; if (array_len == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT())) return TRUE; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_NEW; if (array_len == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT())) return TRUE; return FALSE; } /* * CONFIG_SPARSEMEM_EXTREME */ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_OLD; if (array_len == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) return TRUE; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_NEW; if (array_len == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) return TRUE; return FALSE; } static struct s390x_cpu *s390x_cpu_vec; static int s390x_cpu_cnt; /* * Return s390x CPU data for backtrace */ static struct s390x_cpu *s390x_cpu_get(struct bt_info *bt) { unsigned int cpu = bt->tc->processor; unsigned long lowcore_ptr, prefix; unsigned int i; lowcore_ptr = symbol_value("lowcore_ptr"); readmem(lowcore_ptr + cpu * sizeof(long), KVADDR, &prefix, sizeof(long), "lowcore_ptr", FAULT_ON_ERROR); for (i = 0; i < s390x_cpu_cnt; i++) { if (s390x_cpu_vec[i].prefix == prefix) return &s390x_cpu_vec[i]; } error(FATAL, "cannot determine CPU for task: %lx\n", bt->task); return NULL; } /* * ELF core dump fuctions for storing CPU data */ static void s390x_elf_nt_prstatus_add(struct s390x_cpu *cpu, struct s390x_nt_prstatus *prstatus) { memcpy(&cpu->psw, &prstatus->psw, sizeof(cpu->psw)); memcpy(&cpu->gprs, &prstatus->gprs, sizeof(cpu->gprs)); memcpy(&cpu->acrs, &prstatus->acrs, sizeof(cpu->acrs)); } static void s390x_elf_nt_fpregset_add(struct s390x_cpu *cpu, struct s390x_nt_fpregset *fpregset) { memcpy(&cpu->fpc, &fpregset->fpc, sizeof(cpu->fpc)); memcpy(&cpu->fprs, &fpregset->fprs, sizeof(cpu->fprs)); } static void s390x_elf_nt_timer_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->timer, desc, sizeof(cpu->timer)); } static void s390x_elf_nt_todcmp_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->todcmp, desc, sizeof(cpu->todcmp)); } static void s390x_elf_nt_todpreg_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->todpreg, desc, sizeof(cpu->todpreg)); } static void s390x_elf_nt_ctrs_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->ctrs, desc, sizeof(cpu->ctrs)); } static void s390x_elf_nt_prefix_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->prefix, desc, sizeof(cpu->prefix)); } static void s390x_elf_nt_vxrs_low_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->vxrs_low, desc, sizeof(cpu->vxrs_low)); } static void s390x_elf_nt_vxrs_high_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->vxrs_high, desc, sizeof(cpu->vxrs_high)); } static void *get_elf_note_desc(Elf64_Nhdr *note) { void *ptr = note; return ptr + roundup(sizeof(*note) + note->n_namesz, 4); } static void s390x_elf_note_add(int elf_cpu_nr, void *note_ptr) { Elf64_Nhdr *note = note_ptr; struct s390x_cpu *cpu; void *desc; desc = get_elf_note_desc(note); if (elf_cpu_nr != s390x_cpu_cnt) { s390x_cpu_cnt++; s390x_cpu_vec = realloc(s390x_cpu_vec, s390x_cpu_cnt * sizeof(*s390x_cpu_vec)); if (!s390x_cpu_vec) error(FATAL, "cannot malloc cpu space."); } cpu = &s390x_cpu_vec[s390x_cpu_cnt - 1]; switch (note->n_type) { case NT_PRSTATUS: s390x_elf_nt_prstatus_add(cpu, desc); break; case NT_FPREGSET: s390x_elf_nt_fpregset_add(cpu, desc); break; case NT_S390_TIMER: s390x_elf_nt_timer_add(cpu, desc); break; case NT_S390_TODCMP: s390x_elf_nt_todcmp_add(cpu, desc); break; case NT_S390_TODPREG: s390x_elf_nt_todpreg_add(cpu, desc); break; case NT_S390_CTRS: s390x_elf_nt_ctrs_add(cpu, desc); break; case NT_S390_PREFIX: s390x_elf_nt_prefix_add(cpu, desc); break; case NT_S390_VXRS_LOW: s390x_elf_nt_vxrs_low_add(cpu, desc); break; case NT_S390_VXRS_HIGH: s390x_elf_nt_vxrs_high_add(cpu, desc); break; } } static void s390x_process_elf_notes(void *note_ptr, unsigned long size_note) { Elf64_Nhdr *note = NULL; size_t tot, len; static int num_prstatus_notes = 0; for (tot = 0; tot < size_note; tot += len) { note = note_ptr + tot; if (note->n_type == NT_PRSTATUS) num_prstatus_notes++; machdep->dumpfile_init(num_prstatus_notes, note); len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); } } static void s390x_check_live(void) { unsigned long long live_magic; readmem(0, KVADDR, &live_magic, sizeof(live_magic), "live_magic", RETURN_ON_ERROR | QUIET); if (live_magic == 0x4c49564544554d50ULL) pc->flags2 |= LIVE_DUMP; } /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void s390x_init(int when) { switch (when) { case SETUP_ENV: machdep->dumpfile_init = s390x_elf_note_add; machdep->process_elf_notes = s390x_process_elf_notes; break; case PRE_SYMTAB: machdep->verify_symbol = s390x_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); // machdep->stacksize = KERNEL_STACK_SIZE; if ((machdep->pgd = (char *)malloc(SEGMENT_TABLE_SIZE)) == NULL) error(FATAL, "cannot malloc pgd space."); machdep->pmd = machdep->pgd; if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->get_kvaddr_ranges = s390x_get_kvaddr_ranges; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: machdep->kvbase = 0; machdep->identity_map_base = 0; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = s390x_is_uvaddr; machdep->eframe_search = s390x_eframe_search; machdep->back_trace = s390x_back_trace_cmd; machdep->processor_speed = s390x_processor_speed; machdep->uvtop = s390x_uvtop; machdep->kvtop = s390x_kvtop; machdep->get_task_pgd = s390x_get_task_pgd; machdep->get_stack_frame = s390x_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = s390x_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = s390x_is_task_addr; machdep->dis_filter = s390x_dis_filter; machdep->cmd_mach = s390x_cmd_mach; machdep->get_smp_cpus = s390x_get_smp_cpus; machdep->line_number_hooks = s390x_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; vt->flags |= COMMON_VADDR; s390x_check_live(); break; case POST_GDB: if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 0; machdep->vmalloc_start = s390x_vmalloc_start; machdep->dump_irq = generic_dump_irq; if (!machdep->hz) machdep->hz = HZ; machdep->section_size_bits = _SECTION_SIZE_BITS; if (!set_s390x_max_physmem_bits()) error(WARNING, "cannot determine MAX_PHYSMEM_BITS\n"); s390x_offsets_init(); break; case POST_INIT: break; } } /* * Dump machine dependent information */ void s390x_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->kvbase); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", (unsigned long long)machdep->memsize, (unsigned long long)machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: s390x_eframe_search()\n"); fprintf(fp, " back_trace: s390x_back_trace_cmd()\n"); fprintf(fp, " processor_speed: s390x_processor_speed()\n"); fprintf(fp, " uvtop: s390x_uvtop()\n"); fprintf(fp, " kvtop: s390x_kvtop()\n"); fprintf(fp, " get_task_pgd: s390x_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: s390x_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: s390x_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: s390x_vmalloc_start()\n"); fprintf(fp, " is_task_addr: s390x_is_task_addr()\n"); fprintf(fp, " verify_symbol: s390x_verify_symbol()\n"); fprintf(fp, " dis_filter: s390x_dis_filter()\n"); fprintf(fp, " cmd_mach: s390x_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: s390x_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: s390x_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " get_kvaddr_ranges: s390x_get_kvaddr_ranges()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " dumpfile_init: s390x_elf_note_add()\n"); fprintf(fp, " process_elf_notes: s390x_process_elf_notes()\n"); fprintf(fp, " line_number_hooks: s390x_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } /* * Check if address is in context's address space */ static int s390x_is_uvaddr(ulong vaddr, struct task_context *tc) { return IN_TASK_VMA(tc->task, vaddr); } /* * Translates a user virtual address to its physical address */ static int s390x_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long pgd_base; readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd_base,sizeof(long), "pgd_base",FAULT_ON_ERROR); return s390x_vtop(pgd_base, vaddr, paddr, verbose); } /* * Translates a kernel virtual address to its physical address */ static int s390x_kvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long pgd_base; if (!IS_KVADDR(vaddr)){ *paddr = 0; return FALSE; } if (!vt->vmalloc_start) { *paddr = VTOP(vaddr); return TRUE; } if (!IS_VMALLOC_ADDR(vaddr)) { *paddr = VTOP(vaddr); return TRUE; } pgd_base = (unsigned long)vt->kernel_pgd[0]; return s390x_vtop(pgd_base, vaddr, paddr, verbose); } /* * Check if page is mapped */ static inline int s390x_pte_present(unsigned long x){ if(THIS_KERNEL_VERSION >= LINUX(2,6,0)){ return !((x) & S390X_PAGE_INVALID) || ((x) & S390X_PAGE_INVALID_MASK) == S390X_PAGE_INVALID_NONE; } else { return ((x) & S390X_PAGE_PRESENT); } } /* * page table traversal functions */ /* Region or segment table traversal function */ static ulong _kl_rsg_table_deref_s390x(ulong vaddr, ulong table, int len, int level) { ulong offset, entry; offset = ((vaddr >> (11*level + 20)) & 0x7ffULL) * 8; if (offset >= (len + 1)*4096) /* Offset is over the table limit. */ return 0; readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); /* * Check if the segment table entry could be read and doesn't have * any of the reserved bits set. */ if ((entry & 0xcULL) != (level << 2)) return 0; /* Check if the region table entry has the invalid bit set. */ if (entry & 0x20ULL) return 0; /* Region table entry is valid and well formed. */ return entry; } /* Check for swap entry */ static int swap_entry(ulong entry) { if (THIS_KERNEL_VERSION < LINUX(2,6,19)) { if ((entry & 0x601ULL) == 0x600ULL) return 1; } if (THIS_KERNEL_VERSION < LINUX(3,12,0)) { if ((entry & 0x403ULL) == 0x403ULL) return 1; } else { if ((entry & 0x603ULL) == 0x402ULL) return 1; } return 0; } /* Page table traversal function */ static ulong _kl_pg_table_deref_s390x(ulong vaddr, ulong table) { ulong offset, entry; offset = ((vaddr >> 12) & 0xffULL) * 8; readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); /* * Return zero if the page table entry has the reserved (0x800) or * the invalid (0x400) bit set and it is not a swap entry. */ if ((entry & 0xc00ULL) && !swap_entry(entry)) return 0; /* Page table entry is valid and well formed. */ return entry; } /* lookup virtual address in page tables */ int s390x_vtop(ulong table, ulong vaddr, physaddr_t *phys_addr, int verbose) { ulong entry, paddr; int level, len; *phys_addr = 0; /* * Walk the region and segment tables. * We assume that the table length field in the asce is set to the * maximum value of 3 (which translates to a region first, region * second, region third or segment table with 2048 entries) and that * the addressing mode is 64 bit. */ len = 3; /* Read the first entry to find the number of page table levels. */ readmem(table, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); level = (entry & 0xcULL) >> 2; if ((vaddr >> (31 + 11*level)) != 0ULL) { /* Address too big for the number of page table levels. */ return FALSE; } while (level >= 0) { entry = _kl_rsg_table_deref_s390x(vaddr, table, len, level); if (!entry) return FALSE; table = entry & ~0xfffULL; /* Check if this a 2GB page */ if ((entry & 0x400ULL) && (level == 1)) { /* Add the 2GB frame offset & return the final value. */ table &= ~0x7fffffffULL; *phys_addr = table + (vaddr & 0x7fffffffULL); return TRUE; } len = entry & 0x3ULL; level--; } /* Check if this is a large page. */ if (entry & 0x400ULL) { /* Add the 1MB page offset and return the final value. */ table &= ~0xfffffULL; *phys_addr = table + (vaddr & 0xfffffULL); return TRUE; } /* Get the page table entry */ entry = _kl_pg_table_deref_s390x(vaddr, entry & ~0x7ffULL); if (!entry) return FALSE; /* For swap entries we have to return FALSE and phys_addr = PTE */ if (swap_entry(entry)) { *phys_addr = entry; return FALSE; } /* Isolate the page origin from the page table entry. */ paddr = entry & ~0xfffULL; /* Add the page offset and return the final value. */ *phys_addr = paddr + (vaddr & 0xfffULL); return TRUE; } /* * Determine where vmalloc'd memory starts. */ static ulong s390x_vmalloc_start(void) { unsigned long highmem_addr,high_memory; highmem_addr=symbol_value("high_memory"); readmem(highmem_addr, PHYSADDR, &high_memory,sizeof(long), "highmem",FAULT_ON_ERROR); return high_memory; } /* * Check if address can be a valid task_struct */ static int s390x_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * return MHz - unfortunately it is not possible to get this on linux * for zSeries */ static ulong s390x_processor_speed(void) { return 0; } /* * Accept or reject a symbol from the kernel namelist. */ static int s390x_verify_symbol(const char *name, ulong value, char type) { int i; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "startup") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (STREQ(name, "Letext") || STREQ(name, "gcc2_compiled.")) return FALSE; /* reject L2^B symbols */ if (strstr(name, "L2\002") == name) return FALSE; /* throw away all symbols containing a '.' */ for(i = 0; i < strlen(name);i++){ if(name[i] == '.') return FALSE; } return TRUE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong s390x_get_task_pgd(ulong task) { return (error(FATAL, "s390x_get_task_pgd: TBD\n")); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int s390x_translate_pte(ulong pte, void *physaddr, ulonglong unused) { char *arglist[MAXARGS]; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; int c,len1,len2,len3; if(S390X_PTE_INVALID(pte)){ fprintf(fp,"PTE is invalid\n"); return FALSE; } if(physaddr) *((ulong *)physaddr) = pte & S390X_PAGE_BASE_MASK; if(!s390x_pte_present(pte)){ swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|LJUST, "PTE"), mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); sprintf(ptebuf, "%lx", pte); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return FALSE; } fprintf(fp,"PTE PHYSICAL FLAGS\n"); fprintf(fp,"%08lx %08llx",pte, pte & S390X_PAGE_BASE_MASK); fprintf(fp," ("); if(pte & S390X_PAGE_INVALID) fprintf(fp,"INVALID "); if(pte & S390X_PAGE_RO) fprintf(fp,"PROTECTION"); fprintf(fp,")"); return TRUE; } /* * Look for likely exception frames in a stack. */ static int s390x_eframe_search(struct bt_info *bt) { if(bt->flags & BT_EFRAME_SEARCH2) return (error(FATAL, "Option '-E' is not implemented for this architecture\n")); else return (error(FATAL, "Option '-e' is not implemented for this architecture\n")); } #ifdef DEPRECATED /* * returns cpu number of task */ static int s390x_cpu_of_task(unsigned long task) { unsigned int cpu; if(VALID_MEMBER(task_struct_processor)){ /* linux 2.4 */ readmem(task + OFFSET(task_struct_processor),KVADDR, &cpu, sizeof(cpu), "task_struct_processor", FAULT_ON_ERROR); } else { /* linux 2.6 */ char thread_info[8192]; unsigned long thread_info_addr; readmem(task + OFFSET(task_struct_thread_info),KVADDR, &thread_info_addr, sizeof(thread_info_addr), "thread info addr", FAULT_ON_ERROR); readmem(thread_info_addr,KVADDR,thread_info,sizeof(thread_info), "thread info", FAULT_ON_ERROR); cpu = *((int*) &thread_info[OFFSET(thread_info_cpu)]); } return cpu; } #endif /* * returns true, if task of bt currently is executed by a cpu */ static int s390x_has_cpu(struct bt_info *bt) { int cpu = bt->tc->processor; if (is_task_active(bt->task) && (kt->cpu_flags[cpu] & ONLINE_MAP)) return TRUE; else return FALSE; } /* * read lowcore for cpu */ static void s390x_get_lowcore(struct bt_info *bt, char* lowcore) { unsigned long lowcore_array,lowcore_ptr; struct s390x_cpu *s390x_cpu; int cpu = bt->tc->processor; lowcore_array = symbol_value("lowcore_ptr"); readmem(lowcore_array + cpu * S390X_WORD_SIZE,KVADDR, &lowcore_ptr, sizeof(long), "lowcore_ptr", FAULT_ON_ERROR); readmem(lowcore_ptr, KVADDR, lowcore, LOWCORE_SIZE, "lowcore", FAULT_ON_ERROR); if (!s390x_cpu_vec) return; /* Copy register information to defined places in lowcore */ s390x_cpu = s390x_cpu_get(bt); memcpy(lowcore + 4864, &s390x_cpu->psw, sizeof(s390x_cpu->psw)); memcpy(lowcore + 4736, &s390x_cpu->gprs, sizeof(s390x_cpu->gprs)); memcpy(lowcore + 4928, &s390x_cpu->acrs, sizeof(s390x_cpu->acrs)); memcpy(lowcore + 4892, &s390x_cpu->fpc, sizeof(s390x_cpu->fpc)); memcpy(lowcore + 4608, &s390x_cpu->fprs, sizeof(s390x_cpu->fprs)); memcpy(lowcore + 4888, &s390x_cpu->prefix, sizeof(s390x_cpu->prefix)); memcpy(lowcore + 4992, &s390x_cpu->ctrs, sizeof(s390x_cpu->ctrs)); memcpy(lowcore + 4900, &s390x_cpu->todpreg, sizeof(s390x_cpu->todpreg)); memcpy(lowcore + 4904, &s390x_cpu->timer, sizeof(s390x_cpu->timer)); memcpy(lowcore + 4912, &s390x_cpu->todcmp, sizeof(s390x_cpu->todcmp)); } /* * Copy VX registers out of s390x cpu */ static void vx_copy(void *buf, struct s390x_cpu *s390x_cpu) { char *_buf = buf; int i; for (i = 0; i < 16; i++) { memcpy(&_buf[i * 16], &s390x_cpu->fprs[i], 8); memcpy(&_buf[i * 16 + 8], &s390x_cpu->vxrs_low[i], 8); } memcpy(&_buf[16 * 16], &s390x_cpu->vxrs_high[0], 16 * 16); } /* * Check if VX registers are available */ static int has_vx_regs(char *lowcore) { unsigned long addr = *((uint64_t *)(lowcore + 0x11b0)); if (addr == 0 || addr % 1024) return 0; return 1; } /* * Print vector registers for cpu */ static void s390x_print_vx_sa(struct bt_info *bt, char *lc) { char vx_sa[VX_SA_SIZE]; uint64_t addr; if (!(bt->flags & BT_SHOW_ALL_REGS)) return; if (!has_vx_regs(lc)) return; if (!s390x_cpu_vec) { /* Pointer to save area */ addr = *((uint64_t *)(lc + 0x11b0)); readmem(addr, KVADDR, vx_sa, sizeof(vx_sa), "vx_sa", FAULT_ON_ERROR); } else { /* Get data from s390x cpu */ vx_copy(vx_sa, s390x_cpu_get(bt)); } fprintf(fp, " -vector registers:\n"); print_hex_buf(vx_sa, sizeof(vx_sa), 2, " "); } /* * Get stack address for interrupt stack using the pcpu array */ static unsigned long get_int_stack_pcpu(char *stack_name, int cpu) { unsigned long addr; addr = symbol_value("pcpu_devices") + cpu * STRUCT_SIZE("pcpu") + MEMBER_OFFSET("pcpu", stack_name); return readmem_ul(addr) + INT_STACK_SIZE; } /* * Get stack address for interrupt stack using the lowcore */ static unsigned long get_int_stack_lc(char *stack_name, char *lc) { if (!MEMBER_EXISTS(lc_struct, stack_name)) return 0; return roundup(ULONG(lc + MEMBER_OFFSET(lc_struct, stack_name)), PAGESIZE()); } /* * Read interrupt stack (either "async_stack" or "panic_stack"); */ static void get_int_stack(char *stack_name, int cpu, char *lc, unsigned long *start, unsigned long *end) { unsigned long stack_addr; *start = *end = 0; if (strcmp(stack_name, "restart_stack") == 0) { stack_addr = symbol_value("restart_stack"); stack_addr = readmem_ul(stack_addr); } else { if (symbol_exists("pcpu_devices") && MEMBER_EXISTS("pcpu", stack_name)) stack_addr = get_int_stack_pcpu(stack_name, cpu); else stack_addr = get_int_stack_lc(stack_name, lc); } if (stack_addr == 0) return; *start = stack_addr - INT_STACK_SIZE; *end = stack_addr; } /* * Print hex data */ static void print_hex(unsigned long addr, int len, int cols) { int j, first = 1; for (j = 0; j < len; j += 8) { if (j % (cols * 8) == 0) { if (!first) fprintf(fp, "\n"); else first = 0; fprintf(fp, " %016lx: ", addr + j); } fprintf(fp, " %016lx", readmem_ul(addr + j)); } if (len) fprintf(fp, "\n"); } /* * Print hexdump of stack frame data */ static void print_frame_data(unsigned long sp, unsigned long high) { unsigned long next_sp, len = high - sp; next_sp = readmem_ul(sp + MEMBER_OFFSET("stack_frame", "back_chain")); if (next_sp == 0) len = MIN(len, SIZE(s390_stack_frame) + STRUCT_SIZE("pt_regs")); else len = MIN(len, next_sp - sp); print_hex(sp, len, 2); } /* * Do reference check and set flags */ static int bt_reference_check(struct bt_info *bt, unsigned long addr) { if (!BT_REFERENCE_CHECK(bt)) return 0; if (bt->ref->cmdflags & BT_REF_HEXVAL) { if (addr == bt->ref->hexval) bt->ref->cmdflags |= BT_REF_FOUND; } else { if (STREQ(closest_symbol(addr), bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } return 1; } /* * Print stack frame */ static void print_frame(struct bt_info *bt, int cnt, unsigned long sp, unsigned long r14) { struct load_module *lm; char *sym; ulong offset; struct syment *symp; char *name_plus_offset; char buf[BUFSIZE]; if (bt_reference_check(bt, r14)) return; fprintf(fp, "%s#%d [%08lx] ", cnt < 10 ? " " : "", cnt, sp); sym = closest_symbol(r14); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { symp = value_search(r14, &offset); if (symp && offset) name_plus_offset = value_to_symstr(r14, buf, bt->radix); } fprintf(fp, "%s at %lx", name_plus_offset ? name_plus_offset : sym, r14); if (module_symbol(r14, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); if (bt->flags & BT_LINE_NUMBERS) s390x_dump_line_number(r14); } /* * Print pt_regs structure */ static void print_ptregs(struct bt_info *bt, unsigned long sp) { unsigned long addr, psw_flags, psw_addr, offs; struct load_module *lm; char *sym; int i; addr = sp + MEMBER_OFFSET("pt_regs", "psw"); psw_flags = readmem_ul(addr); psw_addr = readmem_ul(addr + sizeof(long)); if (bt_reference_check(bt, psw_addr)) return; fprintf(fp, " PSW: %016lx %016lx ", psw_flags, psw_addr); if (psw_flags & S390X_PSW_MASK_PSTATE) { fprintf(fp, "(user space)\n"); } else { sym = closest_symbol(psw_addr); offs = psw_addr - closest_symbol_value(psw_addr); if (module_symbol(psw_addr, NULL, &lm, NULL, 0)) fprintf(fp, "(%s+%ld [%s])\n", sym, offs, lm->mod_name); else fprintf(fp, "(%s+%ld)\n", sym, offs); } addr = sp + MEMBER_OFFSET("pt_regs", "gprs"); for (i = 0; i < 16; i++) { if (i != 0 && i % 4 == 0) fprintf(fp, "\n"); if (i % 4 == 0) { if (i == 0) fprintf(fp, " GPRS: "); else fprintf(fp, " "); } fprintf(fp, "%016lx ", readmem_ul(addr + i * sizeof(long))); } fprintf(fp, "\n"); } /* * Print back trace for one stack */ static unsigned long show_trace(struct bt_info *bt, int cnt, unsigned long sp, unsigned long low, unsigned long high) { unsigned long reg; unsigned long psw_addr ATTRIBUTE_UNUSED; while (1) { if (sp < low || sp > high - SIZE(s390_stack_frame)) return sp; reg = readmem_ul(sp + OFFSET(s390_stack_frame_r14)); if (!s390x_has_cpu(bt)) print_frame(bt, cnt++, sp, reg); if (bt->flags & BT_FULL) print_frame_data(sp, high); /* Follow the backchain. */ while (1) { low = sp; sp = readmem_ul(sp + OFFSET(s390_stack_frame_back_chain)); if (!sp) { sp = low; break; } if (sp <= low || sp > high - SIZE(s390_stack_frame)) return sp; reg = readmem_ul(sp + OFFSET(s390_stack_frame_r14)); print_frame(bt, cnt++, sp, reg); if (bt->flags & BT_FULL) print_frame_data(sp, high); } /* Zero backchain detected, check for interrupt frame. */ sp += SIZE(s390_stack_frame); if (sp <= low || sp > high - STRUCT_SIZE("pt_regs")) return sp; /* Check for user PSW */ reg = readmem_ul(sp + MEMBER_OFFSET("pt_regs", "psw")); if (reg & S390X_PSW_MASK_PSTATE) { print_ptregs(bt, sp); return sp; } /* Get new backchain from r15 */ reg = readmem_ul(sp + MEMBER_OFFSET("pt_regs", "gprs") + 15 * sizeof(long)); /* Get address of interrupted function */ psw_addr = readmem_ul(sp + MEMBER_OFFSET("pt_regs", "psw") + sizeof(long)); /* Check for loop (kernel_thread_starter) of second zero bc */ if (low == reg || reg == 0) return reg; print_ptregs(bt, sp); low = sp; sp = reg; cnt = 0; } } /* * Unroll a kernel stack */ static void s390x_back_trace_cmd(struct bt_info *bt) { unsigned long low, high, sp = bt->stkptr; int cpu = bt->tc->processor, cnt = 0; char lowcore[LOWCORE_SIZE]; unsigned long psw_flags; if (bt->hp && bt->hp->eip) { error(WARNING, "instruction pointer argument ignored on this architecture!\n"); } if (is_task_active(bt->task) && !(kt->cpu_flags[cpu] & ONLINE_MAP)) { fprintf(fp, " CPU offline\n"); return; } /* * Print lowcore and print interrupt stacks when task has cpu */ if (s390x_has_cpu(bt)) { s390x_get_lowcore(bt, lowcore); psw_flags = ULONG(lowcore + OFFSET(s390_lowcore_psw_save_area)); if (psw_flags & S390X_PSW_MASK_PSTATE) { fprintf(fp,"Task runs in userspace\n"); s390x_print_lowcore(lowcore,bt,0); s390x_print_vx_sa(bt, lowcore); return; } s390x_print_lowcore(lowcore,bt,1); s390x_print_vx_sa(bt, lowcore); fprintf(fp,"\n"); if (symbol_exists("restart_stack")) { get_int_stack("restart_stack", cpu, lowcore, &low, &high); sp = show_trace(bt, cnt, sp, low, high); } get_int_stack("panic_stack", cpu, lowcore, &low, &high); sp = show_trace(bt, cnt, sp, low, high); get_int_stack("async_stack", cpu, lowcore, &low, &high); sp = show_trace(bt, cnt, sp, low, high); } /* * Print task stack */ if (THIS_KERNEL_VERSION >= LINUX(2, 6, 0)) { readmem(bt->task + OFFSET(task_struct_thread_info), KVADDR, &low, sizeof(long), "thread info", FAULT_ON_ERROR); } else { low = bt->task; } high = low + KERNEL_STACK_SIZE; sp = show_trace(bt, cnt, sp, low, high); } /* * print lowcore info (psw and all registers) */ static void s390x_print_lowcore(char* lc, struct bt_info *bt,int show_symbols) { char* ptr; unsigned long tmp[4]; ptr = lc + OFFSET(s390_lowcore_psw_save_area); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390X_WORD_SIZE); if(BT_REFERENCE_CHECK(bt)){ if(bt->ref->cmdflags & BT_REF_HEXVAL){ if(tmp[1] == bt->ref->hexval) bt->ref->cmdflags |= BT_REF_FOUND; } else { if(STREQ(closest_symbol(tmp[1]),bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } return; } fprintf(fp," LOWCORE INFO:\n"); fprintf(fp," -psw : %#018lx %#018lx\n", tmp[0], tmp[1]); if(show_symbols){ fprintf(fp," -function : %s at %lx\n", closest_symbol(tmp[1]), tmp[1]); if (bt->flags & BT_LINE_NUMBERS) s390x_dump_line_number(tmp[1]); } ptr = lc + MEMBER_OFFSET(lc_struct, "prefixreg_save_area"); tmp[0] = UINT(ptr); fprintf(fp," -prefix : %#010lx\n", tmp[0]); ptr = lc + MEMBER_OFFSET(lc_struct, "cpu_timer_save_area"); tmp[0]=ULONG(ptr); fprintf(fp," -cpu timer: %#018lx\n", tmp[0]); ptr = lc + MEMBER_OFFSET(lc_struct, "clock_comp_save_area"); /* * Shift clock comparator by 8 because we got bit positions 0-55 * in byte 1 to 8. The first byte is always zero. */ tmp[0]=ULONG(ptr) << 8; fprintf(fp," -clock cmp: %#018lx\n", tmp[0]); fprintf(fp," -general registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "gpregs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 10* S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 11* S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 12* S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 13* S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 14* S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 15* S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); fprintf(fp," -access registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "access_regs_save_area"); tmp[0]=UINT(ptr); tmp[1]=UINT(ptr + 4); tmp[2]=UINT(ptr + 2 * 4); tmp[3]=UINT(ptr + 3 * 4); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=UINT(ptr + 4 * 4); tmp[1]=UINT(ptr + 5 * 4); tmp[2]=UINT(ptr + 6 * 4); tmp[3]=UINT(ptr + 7 * 4); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=UINT(ptr + 8 * 4); tmp[1]=UINT(ptr + 9 * 4); tmp[2]=UINT(ptr + 10 * 4); tmp[3]=UINT(ptr + 11 * 4); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=UINT(ptr + 12 * 4); tmp[1]=UINT(ptr + 13 * 4); tmp[2]=UINT(ptr + 14 * 4); tmp[3]=UINT(ptr + 15 * 4); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); fprintf(fp," -control registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "cregs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 10 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 11 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 12 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 13 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 14 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 15 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); ptr = lc + MEMBER_OFFSET(lc_struct, "floating_pt_save_area"); fprintf(fp," -floating point registers:\n"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 10 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 11 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 12 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 13 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 14 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 15 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void s390x_get_stack_frame(struct bt_info *bt, ulong *eip, ulong *esp) { unsigned long ksp, r14; int r14_offset; char lowcore[LOWCORE_SIZE]; if(s390x_has_cpu(bt)) s390x_get_lowcore(bt, lowcore); /* get the stack pointer */ if(esp){ if (!LIVE() && s390x_has_cpu(bt)) { ksp = ULONG(lowcore + MEMBER_OFFSET(lc_struct, "gpregs_save_area") + (15 * S390X_WORD_SIZE)); } else { readmem(bt->task + OFFSET(task_struct_thread_ksp), KVADDR, &ksp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } *esp = ksp; } else { /* for 'bt -S' */ ksp=bt->hp->esp; } /* get the instruction address */ if(!eip) return; if(s390x_has_cpu(bt) && esp){ *eip = ULONG(lowcore + OFFSET(s390_lowcore_psw_save_area) + S390X_WORD_SIZE); } else { if(!STRUCT_EXISTS("stack_frame")){ r14_offset = 112; } else { r14_offset = MEMBER_OFFSET("stack_frame","gprs") + 8 * S390X_WORD_SIZE; } readmem(ksp + r14_offset,KVADDR,&r14,sizeof(void*),"eip", FAULT_ON_ERROR); *eip=r14; } } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int s390x_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * so this routine both fixes the references as well as imposing the current * output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x") || STRNEQ(p1, ",0x"))) p1--; if (!(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x") || STRNEQ(p1, ",0x"))) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int s390x_get_smp_cpus(void) { return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Machine dependent command. */ void s390x_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != EOF) { switch(c) { case 'c': fprintf(fp,"'-c' option is not implemented on this architecture\n"); return; case 'm': fprintf(fp,"'-m' option is not implemented on this architecture\n"); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); s390x_display_machine_stats(); } /* * "mach" command output. */ static void s390x_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } static const char *hook_files[] = { "arch/s390x/kernel/entry.S", "arch/s390x/kernel/head.S" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook s390x_line_number_hooks[] = { {"startup",HEAD_S}, {"_stext",HEAD_S}, {"_pstart",HEAD_S}, {"system_call",ENTRY_S}, {"sysc_do_svc",ENTRY_S}, {"sysc_do_restart",ENTRY_S}, {"sysc_return",ENTRY_S}, {"sysc_sigpending",ENTRY_S}, {"sysc_restart",ENTRY_S}, {"sysc_singlestep",ENTRY_S}, {"sysc_tracesys",ENTRY_S}, {"ret_from_fork",ENTRY_S}, {"pgm_check_handler",ENTRY_S}, {"io_int_handler",ENTRY_S}, {"io_return",ENTRY_S}, {"ext_int_handler",ENTRY_S}, {"mcck_int_handler",ENTRY_S}, {"mcck_return",ENTRY_S}, {"restart_int_handler",ENTRY_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void s390x_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) { fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); } else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } static int s390x_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; physaddr_t phys1, phys2; ulong pp1, pp2; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->kvbase; vrp[cnt++].end = vt->high_memory; vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = first_vmalloc_address(); vrp[cnt++].end = last_vmalloc_address(); phys1 = (physaddr_t)(0); phys2 = (physaddr_t)VTOP(vt->high_memory - PAGESIZE()); if (phys_to_page(phys1, &pp1) && phys_to_page(phys2, &pp2) && (pp1 >= vrp[cnt-1].end)) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = pp1; vrp[cnt++].end = pp2; } return cnt; } #endif crash-7.1.4/qemu-load.c0000664000000000000000000006253512634305150013365 0ustar rootroot/* * Qemu save VM loader * * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. * Written by Paolo Bonzini. * * Portions Copyright (C) 2009 David Anderson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define _GNU_SOURCE #include "qemu-load.h" #include #include #include #include #include "kvmdump.h" struct qemu_device * device_alloc (struct qemu_device_list *dl, size_t sz, struct qemu_device_vtbl *vtbl, uint32_t section_id, uint32_t instance_id, uint32_t version_id) { struct qemu_device *d = calloc (1, sz); d->vtbl = vtbl; d->list = dl; d->section_id = section_id; d->instance_id = instance_id; d->version_id = version_id; if (!dl->head) dl->head = dl->tail = d; else { dl->tail->next = d; d->prev = dl->tail; dl->tail = d; } return d; } struct qemu_device * device_find (struct qemu_device_list *dl, uint32_t section_id) { struct qemu_device *d; d = dl->head; while (d && d->section_id != section_id) d = d->next; return d; } struct qemu_device * device_find_instance (struct qemu_device_list *dl, const char *name, uint32_t instance_id) { struct qemu_device *d; d = dl->head; while (d && (strcmp (d->vtbl->name, name) || d->instance_id != instance_id)) d = d->next; return d; } void device_free (struct qemu_device *d) { struct qemu_device_list *dl = d->list; if (d->prev) d->prev->next = d->next; else dl->head = d->next; if (d->next) d->next->prev = d->prev; else dl->tail = d->prev; d->prev = d->next = NULL; if (d->vtbl->free) d->vtbl->free (d, dl); } void device_list_free (struct qemu_device_list *l) { if (!l) return; while (l->head) device_free (l->head); } /* File access. */ static inline uint16_t get_be16 (FILE *fp) { uint8_t a = getc (fp); uint8_t b = getc (fp); return (a << 8) | b; } static inline uint16_t get_le16 (FILE *fp) { uint8_t b = getc (fp); uint8_t a = getc (fp); return (a << 8) | b; } static inline uint32_t get_be32 (FILE *fp) { uint16_t a = get_be16 (fp); uint16_t b = get_be16 (fp); return (a << 16) | b; } static inline uint32_t get_le32 (FILE *fp) { uint16_t b = get_le16 (fp); uint16_t a = get_le16 (fp); return (a << 16) | b; } static inline uint64_t get_be64 (FILE *fp) { uint32_t a = get_be32 (fp); uint32_t b = get_be32 (fp); return ((uint64_t)a << 32) | b; } static inline uint64_t get_le64 (FILE *fp) { uint32_t b = get_le32 (fp); uint32_t a = get_le32 (fp); return ((uint64_t)a << 32) | b; } static inline void get_qemu128 (FILE *fp, union qemu_uint128_t *result) { result->i[1] = get_le32 (fp); result->i[0] = get_le32 (fp); result->i[3] = get_le32 (fp); result->i[2] = get_le32 (fp); } /* RAM loader. */ #define RAM_SAVE_FLAG_FULL 0x01 #define RAM_SAVE_FLAG_COMPRESS 0x02 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 #define RAM_SAVE_FLAG_PAGE 0x08 #define RAM_SAVE_FLAG_EOS 0x10 #define RAM_SAVE_FLAG_CONTINUE 0x20 #define RAM_SAVE_ADDR_MASK (~4095LL) #define RAM_OFFSET_COMPRESSED (~(off_t)255) static void ram_alloc (struct qemu_device_ram *dram, uint64_t size) { // size_t old_npages = dram->offsets ? 0 : dram->last_ram_offset / 4096; // size_t new_npages = size / 4096; // assert (size <= SIZE_MAX); // if (dram->offsets) // dram->offsets = realloc (dram->offsets, // new_npages * sizeof (off_t)); // else // dram->offsets = malloc (new_npages * sizeof (off_t)); // // assert (dram->offsets); // while (old_npages < new_npages) // dram->offsets[old_npages++] = RAM_OFFSET_COMPRESSED | 0; dram->last_ram_offset = size; } #ifndef ATTRIBUTE_UNUSED #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #endif static int get_string (FILE *fp, char *name) { size_t items ATTRIBUTE_UNUSED; int sz = (uint8_t) getc (fp); if (sz == EOF) return -1; items = fread (name, sz, 1, fp); name[sz] = 0; return sz; } static void ram_read_blocks (FILE *fp, uint64_t size) { char name[257]; /* The RAM block table is a list of block names followed by their sizes. Read it until the sizes sum up to SIZE bytes. */ while (size) { get_string (fp, name); size -= get_be64 (fp); } } static uint32_t ram_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { char name[257]; struct qemu_device_ram *dram = (struct qemu_device_ram *)d; uint64_t header; static int pc_ram = 0; for (;;) { uint64_t addr; off_t entry; header = get_be64 (fp); if (feof (fp) || ferror (fp)) return 0; if (header & RAM_SAVE_FLAG_EOS) break; assert (!(header & RAM_SAVE_FLAG_FULL)); addr = header & RAM_SAVE_ADDR_MASK; if (header & RAM_SAVE_FLAG_MEM_SIZE) { ram_alloc (dram, addr); if (d->version_id >= 4) ram_read_blocks(fp, addr); continue; } if (d->version_id >= 4 && !(header & RAM_SAVE_FLAG_CONTINUE)) { get_string(fp, name); if (strcmp(name, "pc.ram") == 0) pc_ram = 1; else pc_ram = 0; } if (header & RAM_SAVE_FLAG_COMPRESS) { entry = RAM_OFFSET_COMPRESSED | getc(fp); if ((d->version_id == 3) || (d->version_id >= 4 && pc_ram)) store_mapfile_offset(addr, &entry); } else if (header & RAM_SAVE_FLAG_PAGE) { entry = ftell(fp); if ((d->version_id == 3) || (d->version_id >= 4 && pc_ram)) store_mapfile_offset(addr, &entry); fseek (fp, 4096, SEEK_CUR); } } dram->fp = fp; return QEMU_FEATURE_RAM; } static void ram_free (struct qemu_device *d, struct qemu_device_list *dl) { struct qemu_device_ram *dram = (struct qemu_device_ram *)d; free (dram->offsets); } int ram_read_phys_page (struct qemu_device_ram *dram, void *buf, uint64_t addr) { off_t ofs; ssize_t bytes ATTRIBUTE_UNUSED; if (addr >= dram->last_ram_offset) return false; assert ((addr & 0xfff) == 0); // ofs = dram->offsets[addr / 4096]; if (load_mapfile_offset(addr, &ofs) < 0) return 0; if ((ofs & RAM_OFFSET_COMPRESSED) == RAM_OFFSET_COMPRESSED) memset (buf, ofs & 255, 4096); else bytes = pread (fileno (dram->fp), buf, 4096, ofs); return true; } static struct qemu_device * ram_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl ram = { "ram", ram_load, ram_free }; assert (version_id == 3 || version_id == 4); kvm->mapinfo.ram_version_id = version_id; return device_alloc (dl, sizeof (struct qemu_device_ram), &ram, section_id, instance_id, version_id); } #define BLK_MIG_FLAG_EOS 2 static uint32_t block_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { uint64_t header; header = get_be64 (fp); assert (header == BLK_MIG_FLAG_EOS); return 0; } static struct qemu_device * block_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl block = { "block", block_load, NULL }; return device_alloc (dl, sizeof (struct qemu_device), &block, section_id, instance_id, version_id); } /* RHEL5 marker. */ static uint32_t rhel5_marker_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { return 0; } static struct qemu_device * rhel5_marker_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl rhel5_marker = { "__rhel5", rhel5_marker_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &rhel5_marker, section_id, instance_id, version_id); } /* cpu_common loader. */ struct qemu_device_cpu_common { struct qemu_device base; uint32_t halted; uint32_t irq; }; static uint32_t cpu_common_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { struct qemu_device_cpu_common *cpu = (struct qemu_device_cpu_common *)d; cpu->halted = get_be32 (fp); cpu->irq = get_be32 (fp); return 0; } static struct qemu_device * cpu_common_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl cpu_common = { "cpu_common", cpu_common_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device_cpu_common), &cpu_common, section_id, instance_id, version_id); } /* CPU loader. */ static inline uint64_t get_be_long (FILE *fp, int size) { uint32_t a = size == 32 ? 0 : get_be32 (fp); uint32_t b = get_be32 (fp); return ((uint64_t)a << 32) | b; } static inline void get_be_fp80 (FILE *fp, union qemu_fpu_reg *result) { result->mmx = get_be64 (fp); result->bytes[9] = getc (fp); result->bytes[8] = getc (fp); } static void cpu_load_seg (FILE *fp, struct qemu_x86_seg *seg, int size) { seg->selector = get_be32 (fp); seg->base = get_be_long (fp, size); seg->limit = get_be32 (fp); seg->flags = get_be32 (fp); } static bool v12_has_xsave_state(FILE *fp) { char name[257]; bool ret = true; long offset = ftell(fp); // save offset /* * peek into byte stream to check for APIC vmstate */ if (getc(fp) == QEMU_VM_SECTION_FULL) { get_be32(fp); // skip section id get_string(fp, name); if (strcmp(name, "apic") == 0) ret = false; } fseek(fp, offset, SEEK_SET); // restore offset return ret; } static uint32_t cpu_load (struct qemu_device *d, FILE *fp, int size) { struct qemu_device_x86 *dx86 = (struct qemu_device_x86 *)d; uint32_t qemu_hflags = 0, qemu_hflags2 = 0; int nregs; uint32_t version_id = dx86->dev_base.version_id; uint32_t rhel5_version_id; int i; off_t restart; struct qemu_device *drhel5; struct qemu_device_cpu_common *dcpu; if (kvm->flags & KVMHOST_32) size = 32; restart = ftello(fp); retry: nregs = size == 32 ? 8 : 16; drhel5 = device_find_instance (d->list, "__rhel5", 0); if (drhel5 || (version_id >= 7 && version_id <= 9)) { rhel5_version_id = version_id; version_id = 7; } else { rhel5_version_id = 0; version_id = dx86->dev_base.version_id; } dprintf("cpu_load: rhel5_version_id: %d (effective) version_id: %d\n", rhel5_version_id, version_id); dcpu = (struct qemu_device_cpu_common *) device_find_instance (d->list, "cpu_common", d->instance_id); if (dcpu) { dx86->halted = dcpu->halted; dx86->irq = dcpu->irq; // device_free ((struct qemu_device *) dcpu); } for (i = 0; i < nregs; i++) dx86->regs[i] = get_be_long (fp, size); dx86->eip = get_be_long (fp, size); dx86->eflags = get_be_long (fp, size); qemu_hflags = get_be32 (fp); dx86->fpucw = get_be16 (fp); dx86->fpusw = get_be16 (fp); dx86->fpu_free = get_be16 (fp); if (get_be16 (fp)) for (i = 0; i < 8; i++) dx86->st[i].mmx = get_be64 (fp); else for (i = 0; i < 8; i++) get_be_fp80 (fp, &dx86->st[i]); cpu_load_seg (fp, &dx86->es, size); cpu_load_seg (fp, &dx86->cs, size); cpu_load_seg (fp, &dx86->ss, size); cpu_load_seg (fp, &dx86->ds, size); cpu_load_seg (fp, &dx86->fs, size); cpu_load_seg (fp, &dx86->gs, size); cpu_load_seg (fp, &dx86->ldt, size); cpu_load_seg (fp, &dx86->tr, size); cpu_load_seg (fp, &dx86->gdt, size); cpu_load_seg (fp, &dx86->idt, size); dx86->sysenter.cs = get_be32 (fp); dx86->sysenter.esp = get_be_long (fp, version_id <= 6 ? 32 : size); dx86->sysenter.eip = get_be_long (fp, version_id <= 6 ? 32 : size); dx86->cr0 = get_be_long (fp, size); dx86->cr2 = get_be_long (fp, size); dx86->cr3 = get_be_long (fp, size); dx86->cr4 = get_be_long (fp, size); for (i = 0; i < 8; i++) dx86->dr[i] = get_be_long (fp, size); dx86->a20_masked = get_be32 (fp) != 0xffffffff; dx86->mxcsr = get_be32 (fp); for (i = 0; i < nregs; i++) get_qemu128 (fp, &dx86->xmm[i]); if (size == 64) { dx86->efer = get_be64 (fp); dx86->star = get_be64 (fp); dx86->lstar = get_be64 (fp); dx86->cstar = get_be64 (fp); dx86->fmask = get_be64 (fp); dx86->kernel_gs_base = get_be64 (fp); } dx86->smbase = get_be32 (fp); dx86->soft_mmu = qemu_hflags & (1 << 2); dx86->smm = qemu_hflags & (1 << 19); if (version_id == 4) goto store; dx86->pat = get_be64 (fp); qemu_hflags2 = get_be32 (fp); dx86->global_if = qemu_hflags2 & (1 << 0); dx86->in_nmi = qemu_hflags2 & (1 << 2); if (version_id < 6) dx86->halted = get_be32 (fp); dx86->svm.hsave = get_be64 (fp); dx86->svm.vmcb = get_be64 (fp); dx86->svm.tsc_offset = get_be64 (fp); dx86->svm.in_vmm = qemu_hflags & (1 << 21); dx86->svm.guest_if_mask = qemu_hflags2 & (1 << 1); dx86->svm.guest_intr_masking = qemu_hflags2 & (1 << 3); dx86->svm.intercept_mask = get_be64 (fp); dx86->svm.cr_read_mask = get_be16 (fp); dx86->svm.cr_write_mask = get_be16 (fp); dx86->svm.dr_read_mask = get_be16 (fp); dx86->svm.dr_write_mask = get_be16 (fp); dx86->svm.exception_intercept_mask = get_be32 (fp); dx86->cr8 = getc (fp); if (version_id >= 8) { for (i = 0; i < 11; i++) dx86->fixed_mtrr[i] = get_be64 (fp); dx86->deftype_mtrr = get_be64 (fp); for (i = 0; i < 8; i++) { dx86->variable_mtrr[i].base = get_be64 (fp); dx86->variable_mtrr[i].mask = get_be64 (fp); } } /* This was present only when KVM was enabled up to v8. * Furthermore, it changed format in v9. */ if (version_id >= 9) { int32_t pending_irq = (int32_t) get_be32 (fp); if (pending_irq >= 0 && pending_irq <= 255) dx86->kvm.int_bitmap[pending_irq / 64] |= (uint64_t)1 << (pending_irq & 63); dx86->kvm.mp_state = get_be32 (fp); dx86->kvm.tsc = get_be64 (fp); } else if (d->list->features & QEMU_FEATURE_KVM) { for (i = 0; i < 4; i++) dx86->kvm.int_bitmap[i] = get_be64 (fp); dx86->kvm.tsc = get_be64 (fp); if (version_id >= 5) dx86->kvm.mp_state = get_be32 (fp); } if (version_id >= 11) { dx86->kvm.exception_injected = get_be32 (fp); } if (rhel5_version_id >= 8) { dx86->kvm.system_time_msr = get_be64 (fp); dx86->kvm.wall_clock_msr = get_be64 (fp); } if (version_id >= 11 || rhel5_version_id >= 9) { dx86->kvm.soft_interrupt = getc (fp); dx86->kvm.nmi_injected = getc (fp); dx86->kvm.nmi_pending = getc (fp); dx86->kvm.has_error_code = getc (fp); dx86->kvm.sipi_vector = get_be32 (fp); } if (version_id >= 10) { dx86->mce.mcg_cap = get_be64 (fp); dx86->mce.mcg_status = get_be64 (fp); dx86->mce.mcg_ctl = get_be64 (fp); for (i = 0; i < 10 * 4; i++) dx86->mce.mce_banks[i] = get_be64 (fp); } if (version_id >= 11) { dx86->tsc_aux = get_be64 (fp); dx86->kvm.system_time_msr = get_be64 (fp); dx86->kvm.wall_clock_msr = get_be64 (fp); } if (version_id >= 12 && v12_has_xsave_state(fp)) { dx86->xcr0 = get_be64 (fp); dx86->xstate_bv = get_be64 (fp); for (i = 0; i < nregs; i++) get_qemu128 (fp, &dx86->ymmh_regs[i]); } store: if (!kvmdump_regs_store(d->instance_id, dx86)) { size = 32; kvm->flags |= KVMHOST_32; fseeko(fp, restart, SEEK_SET); dprintf("cpu_load: invalid registers: retry with 32-bit host\n"); goto retry; } if (dcpu) device_free ((struct qemu_device *) dcpu); return QEMU_FEATURE_CPU; } static uint32_t cpu_load_32 (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { return cpu_load (d, fp, 32); } static struct qemu_device * cpu_init_load_32 (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { struct qemu_device_x86 *dx86; static struct qemu_device_vtbl cpu = { "cpu", cpu_load_32, NULL }; assert (!live); // assert (version_id >= 4 && version_id <= 9); assert (version_id >= 4 && version_id <= 12); kvm->mapinfo.cpu_version_id = version_id; dx86 = (struct qemu_device_x86 *) device_alloc (dl, sizeof (struct qemu_device_x86), &cpu, section_id, instance_id, version_id); return (struct qemu_device *) dx86; } static uint32_t cpu_load_64 (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { return cpu_load (d, fp, 64); } static struct qemu_device * cpu_init_load_64 (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { struct qemu_device_x86 *dx86; static struct qemu_device_vtbl cpu = { "cpu", cpu_load_64, NULL }; assert (!live); // assert (version_id >= 4 && version_id <= 9); assert (version_id >= 4 && version_id <= 12); kvm->mapinfo.cpu_version_id = version_id; dx86 = (struct qemu_device_x86 *) device_alloc (dl, sizeof (struct qemu_device_x86), &cpu, section_id, instance_id, version_id); return (struct qemu_device *) dx86; } /* IOAPIC loader. */ static uint32_t apic_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { switch (d->version_id) { case 1: fseek (fp, 173, SEEK_CUR); break; case 2: case 3: fseek (fp, 181, SEEK_CUR); break; } return 0; } static struct qemu_device * apic_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl apic = { "apic", apic_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &apic, section_id, instance_id, version_id); } /* timer loader. */ static uint32_t timer_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { fseek (fp, 24, SEEK_CUR); return QEMU_FEATURE_TIMER; } static struct qemu_device * timer_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl timer = { "timer", timer_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &timer, section_id, instance_id, version_id); } /* kvmclock loader. */ static uint32_t kvmclock_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { fseek (fp, 8, SEEK_CUR); return QEMU_FEATURE_KVM; } static struct qemu_device * kvmclock_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl kvmclock = { "kvmclock", kvmclock_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &kvmclock, section_id, instance_id, version_id); } /* kvm-tpr-opt loader. */ static uint32_t kvm_tpr_opt_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { fseek (fp, 144, SEEK_CUR); return QEMU_FEATURE_KVM; } static struct qemu_device * kvm_tpr_opt_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl kvm_tpr_opt = { "kvm-tpr-opt", kvm_tpr_opt_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &kvm_tpr_opt, section_id, instance_id, version_id); } /* Putting it together. */ const struct qemu_device_loader devices_x86_64[] = { { "__rhel5", rhel5_marker_init_load }, { "cpu_common", cpu_common_init_load }, { "kvm-tpr-opt", kvm_tpr_opt_init_load }, { "kvmclock", kvmclock_init_load }, { "cpu", cpu_init_load_64 }, { "apic", apic_init_load }, { "block", block_init_load }, { "ram", ram_init_load }, { "timer", timer_init_load }, { NULL, NULL } }; const struct qemu_device_loader devices_x86_32[] = { { "__rhel5", rhel5_marker_init_load }, { "cpu_common", cpu_common_init_load }, { "kvm-tpr-opt", kvm_tpr_opt_init_load }, { "kvmclock", kvmclock_init_load }, { "cpu", cpu_init_load_32 }, { "apic", apic_init_load }, { "block", block_init_load }, { "ram", ram_init_load }, { "timer", timer_init_load }, { NULL, NULL } }; #define QEMU_VM_FILE_MAGIC 0x5145564D #define LIBVIRT_QEMU_VM_FILE_MAGIC 0x4c696276 struct libvirt_header { char magic[16]; uint32_t version; uint32_t xml_length; uint32_t was_running; uint32_t padding[16]; }; static long device_search(const struct qemu_device_loader *, FILE *); static struct qemu_device * device_get (const struct qemu_device_loader *devices, struct qemu_device_list *dl, enum qemu_save_section sec, FILE *fp) { char name[257]; uint32_t section_id, instance_id, version_id; // bool live; const struct qemu_device_loader *devp; long next_device_offset; next_device: devp = devices; if (sec == QEMU_VM_SUBSECTION) { get_string(fp, name); goto search_device; } section_id = get_be32 (fp); if (sec != QEMU_VM_SECTION_START && sec != QEMU_VM_SECTION_FULL) return device_find (dl, section_id); get_string(fp, name); instance_id = get_be32 (fp); version_id = get_be32 (fp); while (devp->name && strcmp (devp->name, name)) devp++; if (!devp->name) { search_device: dprintf("device_get: unknown/unsupported: \"%s\"\n", name); if ((next_device_offset = device_search(devices, fp))) { fseek(fp, next_device_offset, SEEK_CUR); sec = getc(fp); if (sec == QEMU_VM_EOF) return NULL; goto next_device; } return NULL; } return devp->init_load (dl, section_id, instance_id, version_id, sec == QEMU_VM_SECTION_START, fp); } struct qemu_device_list * qemu_load (const struct qemu_device_loader *devices, uint32_t required_features, FILE *fp) { struct qemu_device_list *result = NULL; struct qemu_device *last = NULL;; size_t items ATTRIBUTE_UNUSED; switch (get_be32 (fp)) { case QEMU_VM_FILE_MAGIC: break; case LIBVIRT_QEMU_VM_FILE_MAGIC: { struct libvirt_header header; memcpy (header.magic, "Libv", 4); items = fread (&header.magic[4], sizeof (header) - 4, 1, fp); if (memcmp ("LibvirtQemudSave", header.magic, 16)) goto fail; fseek (fp, header.xml_length, SEEK_CUR); if (get_be32 (fp) != QEMU_VM_FILE_MAGIC) goto fail; break; } default: goto fail; } if (get_be32 (fp) != 3) return NULL; dprintf("\n"); result = calloc (1, sizeof (struct qemu_device_list)); for (;;) { struct qemu_device *d; uint32_t features; enum qemu_save_section sec = getc (fp); if (feof (fp) || ferror (fp)) break; if (sec == QEMU_VM_EOF) break; d = device_get (devices, result, sec, fp); if (!d) break; if (d != last) { dprintf("qemu_load: \"%s\"\n", d->vtbl->name); last = d; } features = d->vtbl->load (d, fp, sec); if (feof (fp) || ferror (fp)) break; if (sec == QEMU_VM_SECTION_END || sec == QEMU_VM_SECTION_FULL) result->features |= features; } if (ferror (fp) || (result->features & required_features) != required_features) goto fail; return result; fail: device_list_free (result); free (result); return NULL; } /* * crash utility adaptation. */ #include "defs.h" int is_qemu_vm_file(char *filename) { struct libvirt_header header; FILE *vmp; int retval; size_t items ATTRIBUTE_UNUSED; char *xml; if ((vmp = fopen(filename, "r")) == NULL) { error(INFO, "%s: %s\n", filename, strerror(errno)); return FALSE; } retval = FALSE; xml = NULL; switch (get_be32(vmp)) { case QEMU_VM_FILE_MAGIC: retval = TRUE; break; case LIBVIRT_QEMU_VM_FILE_MAGIC: rewind(vmp); items = fread(&header.magic[0], sizeof(header), 1, vmp); if (STRNEQ(header.magic, "LibvirtQemudSave")) { if ((xml = (char *)malloc(header.xml_length))) { items = fread(xml, header.xml_length, 1, vmp); /* * Parse here if necessary or desirable. */ } else fseek(vmp, header.xml_length, SEEK_CUR); if (get_be32(vmp) == QEMU_VM_FILE_MAGIC) retval = TRUE; } break; default: retval = FALSE; } if (xml) free(xml); switch (retval) { case TRUE: kvm->vmp = vmp; kvm->vmfd = fileno(vmp); break; case FALSE: fclose(vmp); break; } return retval; } void dump_qemu_header(FILE *out) { int i; struct libvirt_header header; char magic[4]; uint8_t c; size_t items ATTRIBUTE_UNUSED; rewind(kvm->vmp); if (get_be32(kvm->vmp) == QEMU_VM_FILE_MAGIC) { fprintf(out, "%s: QEMU_VM_FILE_MAGIC\n", pc->dumpfile); return; } rewind(kvm->vmp); items = fread(&header, sizeof(header), 1, kvm->vmp); fprintf(out, "%s: libvirt_header:\n\n", pc->dumpfile); fprintf(out, " magic: "); for (i = 0; i < 16; i++) fprintf(out, "%c", header.magic[i]); fprintf(out, "\n"); fprintf(out, " version: %d\n", header.version); fprintf(out, " xml_length: %d\n", header.xml_length); fprintf(out, " was_running: %d\n", header.was_running); fprintf(out, " padding: (not shown)\n\n"); for (i = 0; i < header.xml_length; i++) { c = getc(kvm->vmp); if (c) fprintf(out, "%c", c); } fprintf(out, "\n"); items = fread(&magic, sizeof(char), 4, kvm->vmp); for (i = 0; i < 4; i++) fprintf(out, "%c", magic[i]); fprintf(out, "\n"); } static long device_search(const struct qemu_device_loader *devices, FILE *fp) { uint sz; char *p1, *p2; long next_device_offset; long remaining; char buf[4096]; off_t current; BZERO(buf, 4096); current = ftello(fp); if (fread(buf, sizeof(char), 4096, fp) != 4096) { fseeko(fp, current, SEEK_SET); return 0; } fseeko(fp, current, SEEK_SET); while (devices->name) { for (p1 = buf, remaining = 4096; (p2 = memchr(p1, devices->name[0], remaining)); p1 = p2+1, remaining = 4096 - (p1-buf)) { sz = *((unsigned char *)p2-1); if (STRNEQ(p2, devices->name) && (strlen(devices->name) == sz)) { *(p2+sz) = '\0'; dprintf("device_search: %s\n", p2); next_device_offset = (p2-buf) - 6; return next_device_offset; } } devices++; } return 0; } crash-7.1.4/memory.c0000775000000000000000000177517312634305150013026 0ustar rootroot/* memory.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * Copyright (C) 2002 Silicon Graphics, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include struct meminfo { /* general purpose memory information structure */ ulong cache; /* used by the various memory searching/dumping */ ulong slab; /* routines. Only one of these is used per cmd */ ulong c_flags; /* so stuff whatever's helpful in here... */ ulong c_offset; ulong c_num; ulong s_mem; void *s_freep; ulong *s_index; ulong s_inuse; ulong cpucached_cache; ulong cpucached_slab; ulong inuse; ulong order; ulong slabsize; ulong num_slabs; ulong objects; ulonglong spec_addr; ulong flags; ulong size; ulong objsize; int memtype; int free; int slab_offset; char *reqname; char *curname; ulong *addrlist; int *kmem_bufctl; ulong *cpudata[NR_CPUS]; ulong *shared_array_cache; int current_cache_index; ulong found; ulong retval; struct struct_member_data *page_member_cache; ulong nr_members; char *ignore; int errors; int calls; int cpu; int cache_count; ulong get_shared; ulong get_totalram; ulong get_buffers; ulong get_slabs; char *slab_buf; char *cache_buf; ulong *cache_list; struct vmlist { ulong addr; ulong size; } *vmlist; ulong container; int *freelist; int freelist_index_size; }; /* * Search modes */ #define SEARCH_ULONG (0) #define SEARCH_UINT (1) #define SEARCH_USHORT (2) #define SEARCH_CHARS (3) #define SEARCH_DEFAULT (SEARCH_ULONG) /* search mode information */ struct searchinfo { int mode; int vcnt; int val; int context; int memtype; int do_task_header; int tasks_found; struct task_context *task_context; ulong vaddr_start; ulong vaddr_end; ulonglong paddr_start; ulonglong paddr_end; union { /* default ulong search */ struct { ulong value[MAXARGS]; char *opt_string[MAXARGS]; ulong mask; } s_ulong; /* uint search */ struct { uint value[MAXARGS]; char *opt_string[MAXARGS]; uint mask; } s_uint; /* ushort search */ struct { ushort value[MAXARGS]; char *opt_string[MAXARGS]; ushort mask; } s_ushort; /* string (chars) search */ struct { char *value[MAXARGS]; int len[MAXARGS]; int started_flag; /* string search needs history */ } s_chars; } s_parms; char buf[BUFSIZE]; }; static char *memtype_string(int, int); static char *error_handle_string(ulong); static void collect_page_member_data(char *, struct meminfo *); struct integer_data { ulong value; ulong bitfield_value; struct struct_member_data *pmd; }; static int get_bitfield_data(struct integer_data *); static int show_page_member_data(char *, ulong, struct meminfo *, char *); static void dump_mem_map(struct meminfo *); static void dump_mem_map_SPARSEMEM(struct meminfo *); static void fill_mem_map_cache(ulong, ulong, char *); static void page_flags_init(void); static int page_flags_init_from_pageflag_names(void); static int page_flags_init_from_pageflags_enum(void); static int translate_page_flags(char *, ulong); static void dump_free_pages(struct meminfo *); static int dump_zone_page_usage(void); static void dump_multidimensional_free_pages(struct meminfo *); static void dump_free_pages_zones_v1(struct meminfo *); static void dump_free_pages_zones_v2(struct meminfo *); struct free_page_callback_data; static int dump_zone_free_area(ulong, int, ulong, struct free_page_callback_data *); static void dump_page_hash_table(struct meminfo *); static void kmem_search(struct meminfo *); static void kmem_cache_init(void); static void kmem_cache_init_slub(void); static ulong max_cpudata_limit(ulong, ulong *); static int kmem_cache_downsize(void); static int ignore_cache(struct meminfo *, char *); static char *is_kmem_cache_addr(ulong, char *); static char *is_kmem_cache_addr_common(ulong, char *); static void kmem_cache_list(void); static void dump_kmem_cache(struct meminfo *); static void dump_kmem_cache_percpu_v1(struct meminfo *); static void dump_kmem_cache_percpu_v2(struct meminfo *); static void dump_kmem_cache_slub(struct meminfo *); static void dump_kmem_cache_info_v2(struct meminfo *); static void kmem_cache_list_common(void); static ulong get_cpu_slab_ptr(struct meminfo *, int, ulong *); static unsigned int oo_order(ulong); static unsigned int oo_objects(ulong); static char *vaddr_to_kmem_cache(ulong, char *, int); static char *is_slab_overload_page(ulong, ulong *, char *); static ulong vaddr_to_slab(ulong); static void do_slab_chain(int, struct meminfo *); static void do_slab_chain_percpu_v1(long, struct meminfo *); static void do_slab_chain_percpu_v2(long, struct meminfo *); static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *); static void do_slab_chain_slab_overload_page(long, struct meminfo *); static int slab_freelist_index_size(void); static int do_slab_slub(struct meminfo *, int); static void do_kmem_cache_slub(struct meminfo *); static void save_slab_data(struct meminfo *); static int slab_data_saved(struct meminfo *); static void dump_saved_slab_data(void); static void dump_slab(struct meminfo *); static void dump_slab_percpu_v1(struct meminfo *); static void dump_slab_percpu_v2(struct meminfo *); static void dump_slab_overload_page(struct meminfo *); static int verify_slab_v1(struct meminfo *, ulong, int); static int verify_slab_v2(struct meminfo *, ulong, int); static int verify_slab_overload_page(struct meminfo *, ulong, int); static void gather_slab_free_list(struct meminfo *); static void gather_slab_free_list_percpu(struct meminfo *); static void gather_slab_free_list_slab_overload_page(struct meminfo *); static void gather_cpudata_list_v1(struct meminfo *); static void gather_cpudata_list_v2(struct meminfo *); static void gather_cpudata_list_v2_nodes(struct meminfo *, int); static int check_cpudata_list(struct meminfo *, ulong); static int check_shared_list(struct meminfo *, ulong); static void gather_slab_cached_count(struct meminfo *); static void dump_slab_objects(struct meminfo *); static void dump_slab_objects_percpu(struct meminfo *); static void dump_vmlist(struct meminfo *); static void dump_vmap_area(struct meminfo *); static int dump_page_lists(struct meminfo *); static void dump_kmeminfo(void); static int page_to_phys(ulong, physaddr_t *); static void display_memory(ulonglong, long, ulong, int, void *); static char *show_opt_string(struct searchinfo *); static void display_with_pre_and_post(void *, ulonglong, struct searchinfo *); static ulong search_ulong(ulong *, ulong, int, struct searchinfo *); static ulong search_uint(ulong *, ulong, int, struct searchinfo *); static ulong search_ushort(ulong *, ulong, int, struct searchinfo *); static ulong search_chars(ulong *, ulong, int, struct searchinfo *); static ulonglong search_ulong_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_uint_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_ushort_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_chars_p(ulong *, ulonglong, int, struct searchinfo *); static void search_virtual(struct searchinfo *); static void search_physical(struct searchinfo *); static int next_upage(struct task_context *, ulong, ulong *); static int next_kpage(ulong, ulong *); static int next_physpage(ulonglong, ulonglong *); static int next_vmlist_vaddr(ulong, ulong *); static int next_module_vaddr(ulong, ulong *); static int next_identity_mapping(ulong, ulong *); static int vm_area_page_dump(ulong, ulong, ulong, ulong, ulong, struct reference *); static void rss_page_types_init(void); static int dump_swap_info(ulong, ulong *, ulong *); static int get_hugetlb_total_pages(ulong *); static void swap_info_init(void); static char *get_swapdev(ulong, char *); static void fill_swap_info(ulong); static char *vma_file_offset(ulong, ulong, char *); static ssize_t read_dev_kmem(ulong, char *, long); static void dump_memory_nodes(int); static void dump_zone_stats(void); #define MEMORY_NODES_DUMP (0) #define MEMORY_NODES_INITIALIZE (1) static void node_table_init(void); static int compare_node_data(const void *, const void *); static void do_vm_flags(ulonglong); static ulonglong get_vm_flags(char *); static void PG_reserved_flag_init(void); static void PG_slab_flag_init(void); static ulong nr_blockdev_pages(void); void sparse_mem_init(void); void dump_mem_sections(void); void list_mem_sections(void); ulong sparse_decode_mem_map(ulong, ulong); char *read_mem_section(ulong); ulong nr_to_section(ulong); int valid_section(ulong); int section_has_mem_map(ulong); ulong section_mem_map_addr(ulong); ulong valid_section_nr(ulong); ulong pfn_to_map(ulong); static int get_nodes_online(void); static int next_online_node(int); static ulong next_online_pgdat(int); static int vm_stat_init(void); static int vm_event_state_init(void); static int dump_vm_stat(char *, long *, ulong); static int dump_vm_event_state(void); static int dump_page_states(void); static int generic_read_dumpfile(ulonglong, void *, long, char *, ulong); static int generic_write_dumpfile(ulonglong, void *, long, char *, ulong); static int page_to_nid(ulong); static int get_kmem_cache_list(ulong **); static int get_kmem_cache_slub_data(long, struct meminfo *); static ulong compound_head(ulong); static long count_partial(ulong, struct meminfo *); static ulong get_freepointer(struct meminfo *, void *); static int count_free_objects(struct meminfo *, ulong); char *is_slab_page(struct meminfo *, char *); static void do_cpu_partial_slub(struct meminfo *, int); static void do_node_lists_slub(struct meminfo *, ulong, int); static int devmem_is_restricted(void); static int switch_to_proc_kcore(void); static int verify_pfn(ulong); static void dump_per_cpu_offsets(void); static void dump_page_flags(ulonglong); static ulong kmem_cache_nodelists(ulong); static void dump_hstates(void); /* * Memory display modes specific to this file. */ #define DISPLAY_8 (0x2) #define DISPLAY_16 (0x4) #define DISPLAY_32 (0x8) #define DISPLAY_64 (0x10) #define SHOW_OFFSET (0x20) #define SYMBOLIC (0x40) #define HEXADECIMAL (0x80) #define DECIMAL (0x100) #define UDECIMAL (0x200) #define ASCII_ENDLINE (0x400) #define NO_ASCII (0x800) #define SLAB_CACHE (0x1000) #define DISPLAY_ASCII (0x2000) #define NET_ENDIAN (0x4000) #define DISPLAY_RAW (0x8000) #define NO_ERROR (0x10000) #define SLAB_CACHE2 (0x20000) #define DISPLAY_TYPES (DISPLAY_RAW|DISPLAY_ASCII|DISPLAY_8|\ DISPLAY_16|DISPLAY_32|DISPLAY_64) #define ASCII_UNLIMITED ((ulong)(-1) >> 1) static ulong DISPLAY_DEFAULT; /* * Verify that the sizeof the primitive types are reasonable. */ void mem_init(void) { if (sizeof(char) != SIZEOF_8BIT) error(FATAL, "unsupported sizeof(char): %d\n", sizeof(char)); if (sizeof(short) != SIZEOF_16BIT) error(FATAL, "unsupported sizeof(short): %d\n", sizeof(short)); if ((sizeof(int) != SIZEOF_32BIT) && (sizeof(int) != SIZEOF_64BIT)) error(FATAL, "unsupported sizeof(int): %d\n", sizeof(int)); if ((sizeof(long) != SIZEOF_32BIT) && (sizeof(long) != SIZEOF_64BIT)) error(FATAL, "unsupported sizeof(long): %d\n", sizeof(long)); if (sizeof(void *) != sizeof(long)) error(FATAL, "pointer size: %d is not sizeof(long): %d\n", sizeof(void *), sizeof(long)); DISPLAY_DEFAULT = (sizeof(long) == 8) ? DISPLAY_64 : DISPLAY_32; } /* * Stash a few popular offsets and some basic kernel virtual memory * items used by routines in this file. */ void vm_init(void) { char buf[BUFSIZE]; int i, len, dimension, nr_node_ids; struct syment *sp_array[2]; ulong value1, value2; char *kmem_cache_node_struct, *nodelists_field; MEMBER_OFFSET_INIT(task_struct_mm, "task_struct", "mm"); MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap"); MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss"); if (!VALID_MEMBER(mm_struct_rss)) MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "_rss"); MEMBER_OFFSET_INIT(mm_struct_anon_rss, "mm_struct", "_anon_rss"); MEMBER_OFFSET_INIT(mm_struct_file_rss, "mm_struct", "_file_rss"); if (!VALID_MEMBER(mm_struct_anon_rss)) { MEMBER_OFFSET_INIT(mm_struct_rss_stat, "mm_struct", "rss_stat"); MEMBER_OFFSET_INIT(mm_rss_stat_count, "mm_rss_stat", "count"); } MEMBER_OFFSET_INIT(mm_struct_total_vm, "mm_struct", "total_vm"); MEMBER_OFFSET_INIT(mm_struct_start_code, "mm_struct", "start_code"); MEMBER_OFFSET_INIT(mm_struct_mm_count, "mm_struct", "mm_count"); MEMBER_OFFSET_INIT(vm_area_struct_vm_mm, "vm_area_struct", "vm_mm"); MEMBER_OFFSET_INIT(vm_area_struct_vm_next, "vm_area_struct", "vm_next"); MEMBER_OFFSET_INIT(vm_area_struct_vm_end, "vm_area_struct", "vm_end"); MEMBER_OFFSET_INIT(vm_area_struct_vm_start, "vm_area_struct", "vm_start"); MEMBER_OFFSET_INIT(vm_area_struct_vm_flags, "vm_area_struct", "vm_flags"); MEMBER_OFFSET_INIT(vm_area_struct_vm_file, "vm_area_struct", "vm_file"); MEMBER_OFFSET_INIT(vm_area_struct_vm_offset, "vm_area_struct", "vm_offset"); MEMBER_OFFSET_INIT(vm_area_struct_vm_pgoff, "vm_area_struct", "vm_pgoff"); MEMBER_SIZE_INIT(vm_area_struct_vm_flags, "vm_area_struct", "vm_flags"); MEMBER_OFFSET_INIT(vm_struct_addr, "vm_struct", "addr"); MEMBER_OFFSET_INIT(vm_struct_size, "vm_struct", "size"); MEMBER_OFFSET_INIT(vm_struct_next, "vm_struct", "next"); MEMBER_OFFSET_INIT(vmap_area_va_start, "vmap_area", "va_start"); MEMBER_OFFSET_INIT(vmap_area_va_end, "vmap_area", "va_end"); MEMBER_OFFSET_INIT(vmap_area_list, "vmap_area", "list"); MEMBER_OFFSET_INIT(vmap_area_flags, "vmap_area", "flags"); MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "vm"); if (INVALID_MEMBER(vmap_area_vm)) MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "private"); STRUCT_SIZE_INIT(vmap_area, "vmap_area"); if (VALID_MEMBER(vmap_area_va_start) && VALID_MEMBER(vmap_area_va_end) && VALID_MEMBER(vmap_area_flags) && VALID_MEMBER(vmap_area_list) && VALID_MEMBER(vmap_area_vm) && kernel_symbol_exists("vmap_area_list")) vt->flags |= USE_VMAP_AREA; if (kernel_symbol_exists("hstates")) { STRUCT_SIZE_INIT(hstate, "hstate"); MEMBER_OFFSET_INIT(hstate_order, "hstate", "order"); MEMBER_OFFSET_INIT(hstate_nr_huge_pages, "hstate", "nr_huge_pages"); MEMBER_OFFSET_INIT(hstate_free_huge_pages, "hstate", "free_huge_pages"); MEMBER_OFFSET_INIT(hstate_name, "hstate", "name"); } MEMBER_OFFSET_INIT(page_next, "page", "next"); if (VALID_MEMBER(page_next)) MEMBER_OFFSET_INIT(page_prev, "page", "prev"); if (INVALID_MEMBER(page_next)) ANON_MEMBER_OFFSET_INIT(page_next, "page", "next"); MEMBER_OFFSET_INIT(page_list, "page", "list"); if (VALID_MEMBER(page_list)) { ASSIGN_OFFSET(page_list_next) = OFFSET(page_list) + OFFSET(list_head_next); ASSIGN_OFFSET(page_list_prev) = OFFSET(page_list) + OFFSET(list_head_prev); } MEMBER_OFFSET_INIT(page_next_hash, "page", "next_hash"); MEMBER_OFFSET_INIT(page_inode, "page", "inode"); MEMBER_OFFSET_INIT(page_offset, "page", "offset"); MEMBER_OFFSET_INIT(page_count, "page", "count"); if (INVALID_MEMBER(page_count)) { MEMBER_OFFSET_INIT(page_count, "page", "_count"); if (INVALID_MEMBER(page_count)) ANON_MEMBER_OFFSET_INIT(page_count, "page", "_count"); } MEMBER_OFFSET_INIT(page_flags, "page", "flags"); MEMBER_SIZE_INIT(page_flags, "page", "flags"); MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); if (INVALID_MEMBER(page_mapping)) ANON_MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); if (INVALID_MEMBER(page_mapping) && (THIS_KERNEL_VERSION < LINUX(2,6,17)) && MEMBER_EXISTS("page", "_mapcount")) ASSIGN_OFFSET(page_mapping) = MEMBER_OFFSET("page", "_mapcount") + STRUCT_SIZE("atomic_t") + sizeof(ulong); MEMBER_OFFSET_INIT(page_index, "page", "index"); if (INVALID_MEMBER(page_index)) ANON_MEMBER_OFFSET_INIT(page_index, "page", "index"); MEMBER_OFFSET_INIT(page_buffers, "page", "buffers"); MEMBER_OFFSET_INIT(page_lru, "page", "lru"); if (INVALID_MEMBER(page_lru)) ANON_MEMBER_OFFSET_INIT(page_lru, "page", "lru"); MEMBER_OFFSET_INIT(page_pte, "page", "pte"); MEMBER_OFFSET_INIT(page_compound_head, "page", "compound_head"); if (INVALID_MEMBER(page_compound_head)) ANON_MEMBER_OFFSET_INIT(page_compound_head, "page", "compound_head"); MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(swap_info_struct_swap_file, "swap_info_struct", "swap_file"); MEMBER_OFFSET_INIT(swap_info_struct_swap_vfsmnt, "swap_info_struct", "swap_vfsmnt"); MEMBER_OFFSET_INIT(swap_info_struct_flags, "swap_info_struct", "flags"); MEMBER_OFFSET_INIT(swap_info_struct_swap_map, "swap_info_struct", "swap_map"); MEMBER_OFFSET_INIT(swap_info_struct_swap_device, "swap_info_struct", "swap_device"); MEMBER_OFFSET_INIT(swap_info_struct_prio, "swap_info_struct", "prio"); MEMBER_OFFSET_INIT(swap_info_struct_max, "swap_info_struct", "max"); MEMBER_OFFSET_INIT(swap_info_struct_pages, "swap_info_struct", "pages"); MEMBER_OFFSET_INIT(swap_info_struct_inuse_pages, "swap_info_struct", "inuse_pages"); MEMBER_OFFSET_INIT(swap_info_struct_old_block_size, "swap_info_struct", "old_block_size"); MEMBER_OFFSET_INIT(block_device_bd_inode, "block_device", "bd_inode"); MEMBER_OFFSET_INIT(block_device_bd_list, "block_device", "bd_list"); MEMBER_OFFSET_INIT(block_device_bd_disk, "block_device", "bd_disk"); MEMBER_OFFSET_INIT(inode_i_mapping, "inode", "i_mapping"); MEMBER_OFFSET_INIT(address_space_page_tree, "address_space", "page_tree"); MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "nrpages"); if (INVALID_MEMBER(address_space_nrpages)) MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "__nrpages"); MEMBER_OFFSET_INIT(gendisk_major, "gendisk", "major"); MEMBER_OFFSET_INIT(gendisk_fops, "gendisk", "fops"); MEMBER_OFFSET_INIT(gendisk_disk_name, "gendisk", "disk_name"); STRUCT_SIZE_INIT(block_device, "block_device"); STRUCT_SIZE_INIT(address_space, "address_space"); STRUCT_SIZE_INIT(gendisk, "gendisk"); STRUCT_SIZE_INIT(blk_major_name, "blk_major_name"); if (VALID_STRUCT(blk_major_name)) { MEMBER_OFFSET_INIT(blk_major_name_next, "blk_major_name", "next"); MEMBER_OFFSET_INIT(blk_major_name_name, "blk_major_name", "name"); MEMBER_OFFSET_INIT(blk_major_name_major, "blk_major_name", "major"); } STRUCT_SIZE_INIT(kmem_slab_s, "kmem_slab_s"); STRUCT_SIZE_INIT(slab_s, "slab_s"); STRUCT_SIZE_INIT(slab, "slab"); STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s"); STRUCT_SIZE_INIT(pgd_t, "pgd_t"); /* * slab: overload struct slab over struct page * https://lkml.org/lkml/2013/10/16/155 */ if (MEMBER_EXISTS("kmem_cache", "freelist_cache")) { vt->flags |= SLAB_OVERLOAD_PAGE; ANON_MEMBER_OFFSET_INIT(page_s_mem, "page", "s_mem"); ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); ANON_MEMBER_OFFSET_INIT(page_active, "page", "active"); } if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) { vt->flags |= PERCPU_KMALLOC_V1; MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", "objsize"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache_s", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs, "kmem_cache_s", "slabs"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_full, "kmem_cache_s", "slabs_full"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_partial, "kmem_cache_s", "slabs_partial"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_free, "kmem_cache_s", "slabs_free"); MEMBER_OFFSET_INIT(kmem_cache_s_cpudata, "kmem_cache_s", "cpudata"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.cpudata", NULL, 0); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", "colour_off"); MEMBER_OFFSET_INIT(slab_s_list, "slab_s", "list"); MEMBER_OFFSET_INIT(slab_s_s_mem, "slab_s", "s_mem"); MEMBER_OFFSET_INIT(slab_s_inuse, "slab_s", "inuse"); MEMBER_OFFSET_INIT(slab_s_free, "slab_s", "free"); MEMBER_OFFSET_INIT(cpucache_s_avail, "cpucache_s", "avail"); MEMBER_OFFSET_INIT(cpucache_s_limit, "cpucache_s", "limit"); STRUCT_SIZE_INIT(cpucache_s, "cpucache_s"); } else if (!VALID_STRUCT(kmem_slab_s) && !VALID_STRUCT(slab_s) && (VALID_STRUCT(slab) || (vt->flags & SLAB_OVERLOAD_PAGE))) { vt->flags |= PERCPU_KMALLOC_V2; if (VALID_STRUCT(kmem_cache_s)) { MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", "colour_off"); MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", "objsize"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache_s", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists"); MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0); } else { STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache"); MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "next"); if (INVALID_MEMBER(kmem_cache_s_next)) { /* * slab/slub unification starting in Linux 3.6. */ MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); } MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", "colour_off"); if (MEMBER_EXISTS("kmem_cache", "objsize")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "objsize"); else if (MEMBER_EXISTS("kmem_cache", "buffer_size")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "buffer_size"); else if (MEMBER_EXISTS("kmem_cache", "size")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "size"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_cpu_cache, "kmem_cache", "cpu_cache"); if (MEMBER_EXISTS("kmem_cache", "lists")) MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists"); else if (MEMBER_EXISTS("kmem_cache", "nodelists") || MEMBER_EXISTS("kmem_cache", "node")) { nodelists_field = MEMBER_EXISTS("kmem_cache", "node") ? "node" : "nodelists"; vt->flags |= PERCPU_KMALLOC_V2_NODES; MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", nodelists_field); if (MEMBER_TYPE("kmem_cache", nodelists_field) == TYPE_CODE_PTR) { /* * nodelists now a pointer to an outside array */ vt->flags |= NODELISTS_IS_PTR; if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; } else if (VALID_MEMBER(kmem_cache_cpu_cache)) { /* * commit bf0dea23a9c094ae869a88bb694fbe966671bf6d * mm/slab: use percpu allocator for cpu cache */ vt->flags |= SLAB_CPU_CACHE; MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; } else { /* * This should never happen with kmem_cache.node, * only with kmem_cache.nodelists */ ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, "kmem_cache.nodelists", NULL, 0); } } MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0); } if (VALID_STRUCT(slab)) { MEMBER_OFFSET_INIT(slab_list, "slab", "list"); MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); MEMBER_OFFSET_INIT(slab_free, "slab", "free"); /* * slab members were moved to an anonymous union in 2.6.39. */ if (INVALID_MEMBER(slab_list)) ANON_MEMBER_OFFSET_INIT(slab_list, "slab", "list"); if (INVALID_MEMBER(slab_s_mem)) ANON_MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); if (INVALID_MEMBER(slab_inuse)) ANON_MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); if (INVALID_MEMBER(slab_free)) ANON_MEMBER_OFFSET_INIT(slab_free, "slab", "free"); } MEMBER_OFFSET_INIT(array_cache_avail, "array_cache", "avail"); MEMBER_OFFSET_INIT(array_cache_limit, "array_cache", "limit"); STRUCT_SIZE_INIT(array_cache, "array_cache"); /* * kmem_list3 renamed to kmem_cache_node in kernel 3.11-rc1 */ kmem_cache_node_struct = STRUCT_EXISTS("kmem_cache_node") ? "kmem_cache_node" : "kmem_list3"; MEMBER_OFFSET_INIT(kmem_list3_slabs_partial, kmem_cache_node_struct, "slabs_partial"); MEMBER_OFFSET_INIT(kmem_list3_slabs_full, kmem_cache_node_struct, "slabs_full"); MEMBER_OFFSET_INIT(kmem_list3_slabs_free, kmem_cache_node_struct, "slabs_free"); MEMBER_OFFSET_INIT(kmem_list3_free_objects, kmem_cache_node_struct, "free_objects"); MEMBER_OFFSET_INIT(kmem_list3_shared, kmem_cache_node_struct, "shared"); /* * Common to slab/slub */ ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); } else if (MEMBER_EXISTS("kmem_cache", "cpu_slab") && STRUCT_EXISTS("kmem_cache_node")) { vt->flags |= KMALLOC_SLUB; STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "objsize"); if (INVALID_MEMBER(kmem_cache_objsize)) MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "object_size"); MEMBER_OFFSET_INIT(kmem_cache_offset, "kmem_cache", "offset"); MEMBER_OFFSET_INIT(kmem_cache_order, "kmem_cache", "order"); MEMBER_OFFSET_INIT(kmem_cache_local_node, "kmem_cache", "local_node"); MEMBER_OFFSET_INIT(kmem_cache_objects, "kmem_cache", "objects"); MEMBER_OFFSET_INIT(kmem_cache_inuse, "kmem_cache", "inuse"); MEMBER_OFFSET_INIT(kmem_cache_align, "kmem_cache", "align"); MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_slab, "kmem_cache", "cpu_slab"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist"); MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page"); MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_partial, "kmem_cache_cpu", "partial"); ANON_MEMBER_OFFSET_INIT(page_inuse, "page", "inuse"); ANON_MEMBER_OFFSET_INIT(page_offset, "page", "offset"); ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); if (INVALID_MEMBER(kmem_cache_objects)) { MEMBER_OFFSET_INIT(kmem_cache_oo, "kmem_cache", "oo"); ANON_MEMBER_OFFSET_INIT(page_objects, "page", "objects"); } if (VALID_MEMBER(kmem_cache_node)) { ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.node", NULL, 0); vt->flags |= CONFIG_NUMA; } ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.cpu_slab", NULL, 0); STRUCT_SIZE_INIT(kmem_cache_node, "kmem_cache_node"); STRUCT_SIZE_INIT(kmem_cache_cpu, "kmem_cache_cpu"); MEMBER_OFFSET_INIT(kmem_cache_node_nr_partial, "kmem_cache_node", "nr_partial"); MEMBER_OFFSET_INIT(kmem_cache_node_nr_slabs, "kmem_cache_node", "nr_slabs"); MEMBER_OFFSET_INIT(kmem_cache_node_partial, "kmem_cache_node", "partial"); MEMBER_OFFSET_INIT(kmem_cache_node_full, "kmem_cache_node", "full"); } else { MEMBER_OFFSET_INIT(kmem_cache_s_c_nextp, "kmem_cache_s", "c_nextp"); MEMBER_OFFSET_INIT(kmem_cache_s_c_name, "kmem_cache_s", "c_name"); MEMBER_OFFSET_INIT(kmem_cache_s_c_num, "kmem_cache_s", "c_num"); MEMBER_OFFSET_INIT(kmem_cache_s_c_org_size, "kmem_cache_s", "c_org_size"); MEMBER_OFFSET_INIT(kmem_cache_s_c_flags, "kmem_cache_s", "c_flags"); MEMBER_OFFSET_INIT(kmem_cache_s_c_offset, "kmem_cache_s", "c_offset"); MEMBER_OFFSET_INIT(kmem_cache_s_c_firstp, "kmem_cache_s", "c_firstp"); MEMBER_OFFSET_INIT(kmem_cache_s_c_gfporder, "kmem_cache_s", "c_gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_c_magic, "kmem_cache_s", "c_magic"); MEMBER_OFFSET_INIT(kmem_cache_s_c_align, "kmem_cache_s", "c_align"); MEMBER_OFFSET_INIT(kmem_slab_s_s_nextp, "kmem_slab_s", "s_nextp"); MEMBER_OFFSET_INIT(kmem_slab_s_s_freep, "kmem_slab_s", "s_freep"); MEMBER_OFFSET_INIT(kmem_slab_s_s_inuse, "kmem_slab_s", "s_inuse"); MEMBER_OFFSET_INIT(kmem_slab_s_s_mem, "kmem_slab_s", "s_mem"); MEMBER_OFFSET_INIT(kmem_slab_s_s_index, "kmem_slab_s", "s_index"); MEMBER_OFFSET_INIT(kmem_slab_s_s_offset, "kmem_slab_s", "s_offset"); MEMBER_OFFSET_INIT(kmem_slab_s_s_magic, "kmem_slab_s", "s_magic"); } if (!kt->kernel_NR_CPUS) { if (enumerator_value("WORK_CPU_UNBOUND", (long *)&value1)) kt->kernel_NR_CPUS = (int)value1; else if ((i = get_array_length("__per_cpu_offset", NULL, 0))) kt->kernel_NR_CPUS = i; else if (ARRAY_LENGTH(kmem_cache_s_cpudata)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_cpudata); else if (ARRAY_LENGTH(kmem_cache_s_array)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_array); else if (ARRAY_LENGTH(kmem_cache_cpu_slab)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_cpu_slab); } if (CRASHDEBUG(1)) fprintf(fp, "kernel NR_CPUS: %d %s\n", kt->kernel_NR_CPUS, kt->kernel_NR_CPUS ? "" : "(unknown)"); if (kt->kernel_NR_CPUS > NR_CPUS) { error(WARNING, "kernel-configured NR_CPUS (%d) greater than compiled-in NR_CPUS (%d)\n", kt->kernel_NR_CPUS, NR_CPUS); error(FATAL, "recompile crash with larger NR_CPUS\n"); } if (machdep->init_kernel_pgd) machdep->init_kernel_pgd(); else if (symbol_exists("swapper_pg_dir")) { value1 = symbol_value("swapper_pg_dir"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value1; } else if (symbol_exists("cpu_pgd")) { len = get_array_length("cpu_pgd", &dimension, 0); if ((len == NR_CPUS) && (dimension == machdep->ptrs_per_pgd)) { value1 = symbol_value("cpu_pgd"); for (i = 0; i < NR_CPUS; i++) { value2 = i * (SIZE(pgd_t) * machdep->ptrs_per_pgd); vt->kernel_pgd[i] = value1 + value2; } error(WARNING, "no swapper_pg_dir: using first entry of cpu_pgd[%d][%d]\n\n", dimension, len); } else { error(WARNING, "unrecognized dimensions: cpu_pgd[%d][%d]\n", dimension, len); value1 = symbol_value("cpu_pgd"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value1; error(WARNING, "no swapper_pg_dir: using first entry of cpu_pgd[%d][%d]\n\n", dimension, len); } } else error(FATAL, "no swapper_pg_dir or cpu_pgd symbols exist?\n"); get_symbol_data("high_memory", sizeof(ulong), &vt->high_memory); if (kernel_symbol_exists("mem_section")) vt->flags |= SPARSEMEM; else if (kernel_symbol_exists("mem_map")) { get_symbol_data("mem_map", sizeof(char *), &vt->mem_map); vt->flags |= FLATMEM; } else vt->flags |= DISCONTIGMEM; sparse_mem_init(); vt->vmalloc_start = machdep->vmalloc_start(); if (IS_VMALLOC_ADDR(vt->mem_map)) vt->flags |= V_MEM_MAP; vt->total_pages = BTOP(VTOP(vt->high_memory)); switch (get_syment_array("totalram_pages", sp_array, 2)) { case 1: get_symbol_data("totalram_pages", sizeof(ulong), &vt->totalram_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalram_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalram_pages #2", RETURN_ON_ERROR))) break; vt->totalram_pages = MAX(value1, value2); break; } if (symbol_exists("totalhigh_pages")) { switch (get_syment_array("totalhigh_pages", sp_array, 2)) { case 1: get_symbol_data("totalhigh_pages", sizeof(ulong), &vt->totalhigh_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalhigh_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalhigh_pages #2", RETURN_ON_ERROR))) break; vt->totalhigh_pages = MAX(value1, value2); break; } vt->total_pages += vt->totalhigh_pages; } if (symbol_exists("num_physpages")) get_symbol_data("num_physpages", sizeof(ulong), &vt->num_physpages); if (kernel_symbol_exists("mem_map")) get_symbol_data("max_mapnr", sizeof(ulong), &vt->max_mapnr); if (kernel_symbol_exists("nr_swapfiles")) get_symbol_data("nr_swapfiles", sizeof(unsigned int), &vt->nr_swapfiles); STRUCT_SIZE_INIT(page, "page"); STRUCT_SIZE_INIT(free_area, "free_area"); STRUCT_SIZE_INIT(free_area_struct, "free_area_struct"); STRUCT_SIZE_INIT(zone, "zone"); STRUCT_SIZE_INIT(zone_struct, "zone_struct"); STRUCT_SIZE_INIT(kmem_bufctl_t, "kmem_bufctl_t"); STRUCT_SIZE_INIT(swap_info_struct, "swap_info_struct"); STRUCT_SIZE_INIT(mm_struct, "mm_struct"); STRUCT_SIZE_INIT(vm_area_struct, "vm_area_struct"); STRUCT_SIZE_INIT(pglist_data, "pglist_data"); if (VALID_STRUCT(pglist_data)) { vt->flags |= ZONES; if (symbol_exists("pgdat_list") && !IS_SPARSEMEM()) vt->flags |= NODES; /* * Determine the number of nodes the best way possible, * starting with a default of 1. */ vt->numnodes = 1; if (symbol_exists("numnodes")) get_symbol_data("numnodes", sizeof(int), &vt->numnodes); if (get_nodes_online()) vt->flags |= NODES_ONLINE; MEMBER_OFFSET_INIT(pglist_data_node_zones, "pglist_data", "node_zones"); MEMBER_OFFSET_INIT(pglist_data_node_mem_map, "pglist_data", "node_mem_map"); MEMBER_OFFSET_INIT(pglist_data_node_start_paddr, "pglist_data", "node_start_paddr"); MEMBER_OFFSET_INIT(pglist_data_node_start_mapnr, "pglist_data", "node_start_mapnr"); MEMBER_OFFSET_INIT(pglist_data_node_size, "pglist_data", "node_size"); MEMBER_OFFSET_INIT(pglist_data_node_id, "pglist_data", "node_id"); MEMBER_OFFSET_INIT(pglist_data_node_next, "pglist_data", "node_next"); MEMBER_OFFSET_INIT(pglist_data_bdata, "pglist_data", "bdata"); MEMBER_OFFSET_INIT(pglist_data_nr_zones, "pglist_data", "nr_zones"); MEMBER_OFFSET_INIT(pglist_data_node_start_pfn, "pglist_data", "node_start_pfn"); MEMBER_OFFSET_INIT(pglist_data_pgdat_next, "pglist_data", "pgdat_next"); MEMBER_OFFSET_INIT(pglist_data_node_present_pages, "pglist_data", "node_present_pages"); MEMBER_OFFSET_INIT(pglist_data_node_spanned_pages, "pglist_data", "node_spanned_pages"); ARRAY_LENGTH_INIT(vt->nr_zones, pglist_data_node_zones, "pglist_data.node_zones", NULL, SIZE_OPTION(zone_struct, zone)); vt->ZONE_HIGHMEM = vt->nr_zones - 1; if (VALID_STRUCT(zone_struct)) { MEMBER_OFFSET_INIT(zone_struct_free_pages, "zone_struct", "free_pages"); MEMBER_OFFSET_INIT(zone_struct_free_area, "zone_struct", "free_area"); MEMBER_OFFSET_INIT(zone_struct_zone_pgdat, "zone_struct", "zone_pgdat"); MEMBER_OFFSET_INIT(zone_struct_name, "zone_struct", "name"); MEMBER_OFFSET_INIT(zone_struct_size, "zone_struct", "size"); if (INVALID_MEMBER(zone_struct_size)) MEMBER_OFFSET_INIT(zone_struct_memsize, "zone_struct", "memsize"); MEMBER_OFFSET_INIT(zone_struct_zone_start_pfn, "zone_struct", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_struct_zone_start_paddr, "zone_struct", "zone_start_paddr"); MEMBER_OFFSET_INIT(zone_struct_zone_start_mapnr, "zone_struct", "zone_start_mapnr"); MEMBER_OFFSET_INIT(zone_struct_zone_mem_map, "zone_struct", "zone_mem_map"); MEMBER_OFFSET_INIT(zone_struct_inactive_clean_pages, "zone_struct", "inactive_clean_pages"); MEMBER_OFFSET_INIT(zone_struct_inactive_clean_list, "zone_struct", "inactive_clean_list"); ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_struct_free_area, "zone_struct.free_area", NULL, SIZE(free_area_struct)); MEMBER_OFFSET_INIT(zone_struct_inactive_dirty_pages, "zone_struct", "inactive_dirty_pages"); MEMBER_OFFSET_INIT(zone_struct_active_pages, "zone_struct", "active_pages"); MEMBER_OFFSET_INIT(zone_struct_pages_min, "zone_struct", "pages_min"); MEMBER_OFFSET_INIT(zone_struct_pages_low, "zone_struct", "pages_low"); MEMBER_OFFSET_INIT(zone_struct_pages_high, "zone_struct", "pages_high"); vt->dump_free_pages = dump_free_pages_zones_v1; } else if (VALID_STRUCT(zone)) { MEMBER_OFFSET_INIT(zone_vm_stat, "zone", "vm_stat"); MEMBER_OFFSET_INIT(zone_free_pages, "zone", "free_pages"); if (INVALID_MEMBER(zone_free_pages) && VALID_MEMBER(zone_vm_stat)) { long nr_free_pages = 0; if (!enumerator_value("NR_FREE_PAGES", &nr_free_pages)) error(WARNING, "cannot determine NR_FREE_PAGES enumerator\n"); ASSIGN_OFFSET(zone_free_pages) = OFFSET(zone_vm_stat) + (nr_free_pages * sizeof(long)); } MEMBER_OFFSET_INIT(zone_free_area, "zone", "free_area"); MEMBER_OFFSET_INIT(zone_zone_pgdat, "zone", "zone_pgdat"); MEMBER_OFFSET_INIT(zone_name, "zone", "name"); MEMBER_OFFSET_INIT(zone_zone_mem_map, "zone", "zone_mem_map"); MEMBER_OFFSET_INIT(zone_zone_start_pfn, "zone", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_spanned_pages, "zone", "spanned_pages"); MEMBER_OFFSET_INIT(zone_present_pages, "zone", "present_pages"); MEMBER_OFFSET_INIT(zone_pages_min, "zone", "pages_min"); MEMBER_OFFSET_INIT(zone_pages_low, "zone", "pages_low"); MEMBER_OFFSET_INIT(zone_pages_high, "zone", "pages_high"); MEMBER_OFFSET_INIT(zone_watermark, "zone", "watermark"); MEMBER_OFFSET_INIT(zone_nr_active, "zone", "nr_active"); MEMBER_OFFSET_INIT(zone_nr_inactive, "zone", "nr_inactive"); MEMBER_OFFSET_INIT(zone_all_unreclaimable, "zone", "all_unreclaimable"); MEMBER_OFFSET_INIT(zone_flags, "zone", "flags"); MEMBER_OFFSET_INIT(zone_pages_scanned, "zone", "pages_scanned"); ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_free_area, "zone.free_area", NULL, SIZE(free_area)); vt->dump_free_pages = dump_free_pages_zones_v2; } } else vt->numnodes = 1; node_table_init(); sprintf(buf, "%llx", (ulonglong) MAX((uint64_t)vt->max_mapnr * PAGESIZE(), machdep->memory_size())); vt->paddr_prlen = strlen(buf); if (vt->flags & PERCPU_KMALLOC_V1) vt->dump_kmem_cache = dump_kmem_cache_percpu_v1; else if (vt->flags & PERCPU_KMALLOC_V2) vt->dump_kmem_cache = dump_kmem_cache_percpu_v2; else if (vt->flags & KMALLOC_SLUB) vt->dump_kmem_cache = dump_kmem_cache_slub; else vt->dump_kmem_cache = dump_kmem_cache; if (!(vt->flags & (NODES|ZONES))) { get_array_length("free_area", &dimension, 0); if (dimension) vt->dump_free_pages = dump_multidimensional_free_pages; else vt->dump_free_pages = dump_free_pages; } if (!(vt->vma_cache = (char *)malloc(SIZE(vm_area_struct)*VMA_CACHE))) error(FATAL, "cannot malloc vm_area_struct cache\n"); if (symbol_exists("page_hash_bits")) { unsigned int page_hash_bits; get_symbol_data("page_hash_bits", sizeof(unsigned int), &page_hash_bits); len = (1 << page_hash_bits); builtin_array_length("page_hash_table", len, NULL); get_symbol_data("page_hash_table", sizeof(void *), &vt->page_hash_table); vt->page_hash_table_len = len; STRUCT_SIZE_INIT(page_cache_bucket, "page_cache_bucket"); if (VALID_STRUCT(page_cache_bucket)) MEMBER_OFFSET_INIT(page_cache_bucket_chain, "page_cache_bucket", "chain"); } else if (symbol_exists("page_hash_table")) { vt->page_hash_table = symbol_value("page_hash_table"); vt->page_hash_table_len = 0; } else if (CRASHDEBUG(1)) error(NOTE, "page_hash_table does not exist in this kernel\n"); kmem_cache_init(); page_flags_init(); rss_page_types_init(); vt->flags |= VM_INIT; } /* * This command displays the contents of memory, with the output formatted * in several different manners. The starting address may be entered either * symbolically or by address. The default output size is the size of a long * data type, and the default output format is hexadecimal. When hexadecimal * output is used, the output will be accompanied by an ASCII translation. * These are the options: * * -p address argument is a physical address. * -u address argument is a user virtual address. * -d display output in signed decimal format (default is hexadecimal). * -D display output in unsigned decimal format (default is hexadecimal). * -s displays output symbolically when appropriate. * -8 display output in 8-bit values. * -16 display output in 16-bit values. * -32 display output in 32-bit values (default on 32-bit machines). * -64 display output in 64-bit values (default on 64-bit machines). * * The default number of items to display is 1, but a count argument, if any, * must follow the address. */ void cmd_rd(void) { int c, memtype; ulong flag; long count; ulonglong addr, endaddr; ulong offset; struct syment *sp; FILE *tmpfp; char *outputfile; flag = HEXADECIMAL|DISPLAY_DEFAULT; endaddr = 0; offset = 0; memtype = KVADDR; tmpfp = NULL; outputfile = NULL; count = -1; while ((c = getopt(argcnt, args, "axme:r:pfudDusSNo:81:3:6:")) != EOF) { switch(c) { case 'a': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_ASCII; break; case '8': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_8; break; case '1': if (!STREQ(optarg, "6")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_16; } break; case '3': if (!STREQ(optarg, "2")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_32; } break; case '6': if (!STREQ(optarg, "4")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_64; } break; case 'e': endaddr = htoll(optarg, FAULT_ON_ERROR, NULL); break; case 'r': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_RAW; outputfile = optarg; if ((tmpfp = fopen(outputfile, "w")) == NULL) error(FATAL, "cannot open output file: %s\n", outputfile); set_tmpfile2(tmpfp); break; case 's': case 'S': if (flag & DISPLAY_DEFAULT) { flag |= SYMBOLIC; if (c == 'S') { if (flag & SLAB_CACHE) flag |= SLAB_CACHE2; else flag |= SLAB_CACHE; } } else { error(INFO, "-%c option" " is only allowed with %d-bit display\n", c, DISPLAY_DEFAULT == DISPLAY_64 ? 64 : 32); argerrs++; } break; case 'o': offset = stol(optarg, FAULT_ON_ERROR, NULL); flag |= SHOW_OFFSET; break; case 'p': memtype &= ~(UVADDR|KVADDR|XENMACHADDR|FILEADDR); memtype = PHYSADDR; break; case 'u': memtype &= ~(KVADDR|PHYSADDR|XENMACHADDR|FILEADDR); memtype = UVADDR; break; case 'd': flag &= ~(HEXADECIMAL|DECIMAL); flag |= DECIMAL; break; case 'D': flag &= ~(HEXADECIMAL|UDECIMAL); flag |= UDECIMAL; break; case 'm': if (!(kt->flags & ARCH_XEN)) error(FATAL, "-m option only applies to xen architecture\n"); memtype &= ~(UVADDR|KVADDR|FILEADDR); memtype = XENMACHADDR; break; case 'f': if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); memtype &= ~(KVADDR|UVADDR|PHYSADDR|XENMACHADDR); memtype = FILEADDR; break; case 'x': flag |= NO_ASCII; break; case 'N': flag |= NET_ENDIAN; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (*args[optind] == '(') addr = evall(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) addr = htoll(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) addr = (ulonglong)sp->value; else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); return; } if (flag & SHOW_OFFSET) addr += offset; if (args[++optind]) count = stol(args[optind], FAULT_ON_ERROR, NULL); if (count == -1) { if (endaddr) { long bcnt; if (endaddr <= addr) error(FATAL, "invalid ending address: %llx\n", endaddr); bcnt = endaddr - addr; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: count = bcnt/8; break; case DISPLAY_32: count = bcnt/4; break; case DISPLAY_16: count = bcnt/2; break; case DISPLAY_8: case DISPLAY_ASCII: case DISPLAY_RAW: count = bcnt; break; } if (bcnt == 0) count = 1; } else { if ((flag & DISPLAY_TYPES) == DISPLAY_RAW) error(FATAL, "-r option requires either a count" " argument or the -e option\n"); count = (flag & DISPLAY_ASCII) ? ASCII_UNLIMITED : 1; } } else if (endaddr) error(WARNING, "ending address ignored when count is specified\n"); if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC) && !(flag & NO_ASCII) && !(flag & DISPLAY_ASCII)) flag |= ASCII_ENDLINE; if (memtype == KVADDR) { if (!COMMON_VADDR_SPACE() && !IS_KVADDR(addr)) memtype = UVADDR; } display_memory(addr, count, flag, memtype, outputfile); } /* * display_memory() does the work for cmd_rd(), but can (and is) called by * other routines that want to dump raw data. Based upon the flag, the * output format is tailored to fit in an 80-character line. Hexadecimal * output is accompanied by an end-of-line ASCII translation. */ #define MAX_HEXCHARS_PER_LINE (32) /* line locations where ASCII output starts */ #define ASCII_START_8 (51 + VADDR_PRLEN) #define ASCII_START_16 (43 + VADDR_PRLEN) #define ASCII_START_32 (39 + VADDR_PRLEN) #define ASCII_START_64 (37 + VADDR_PRLEN) #define ENTRIES_8 (16) /* number of entries per line per size */ #define ENTRIES_16 (8) #define ENTRIES_32 (4) #define ENTRIES_64 (2) struct memloc { /* common holder of read memory */ uint8_t u8; uint16_t u16; uint32_t u32; uint64_t u64; uint64_t limit64; }; static void display_memory(ulonglong addr, long count, ulong flag, int memtype, void *opt) { int i, a, j; size_t typesz, sz; long written; void *location; char readtype[20]; char *addrtype; struct memloc mem; int displayed, per_line; int hx, lost; char hexchars[MAX_HEXCHARS_PER_LINE+1]; char ch; int linelen; char buf[BUFSIZE]; char slab[BUFSIZE]; int ascii_start; ulong error_handle; char *hex_64_fmt = BITS32() ? "%.*llx " : "%.*lx "; char *dec_64_fmt = BITS32() ? "%12lld " : "%15ld "; char *dec_u64_fmt = BITS32() ? "%12llu " : "%20lu "; if (count <= 0) error(FATAL, "invalid count request: %ld\n", count); switch (memtype) { case KVADDR: addrtype = "KVADDR"; break; case UVADDR: addrtype = "UVADDR"; break; case PHYSADDR: addrtype = "PHYSADDR"; break; case XENMACHADDR: addrtype = "XENMACHADDR"; break; case FILEADDR: addrtype = "FILEADDR"; break; default: addrtype = NULL; break; } if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, count, flag, addrtype); if (flag & DISPLAY_RAW) { for (written = 0; written < count; written += sz) { sz = BUFSIZE > (count - written) ? (size_t)(count - written) : (size_t)BUFSIZE; readmem(addr + written, memtype, buf, (long)sz, "raw dump to file", FAULT_ON_ERROR); if (fwrite(buf, 1, sz, pc->tmpfile2) != sz) error(FATAL, "cannot write to: %s\n", (char *)opt); } close_tmpfile2(); fprintf(fp, "%ld bytes copied from 0x%llx to %s\n", count, addr, (char *)opt); return; } BZERO(&mem, sizeof(struct memloc)); hx = lost = linelen = typesz = per_line = ascii_start = 0; location = NULL; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: ascii_start = ASCII_START_64; typesz = SIZEOF_64BIT; location = &mem.u64; sprintf(readtype, "64-bit %s", addrtype); per_line = ENTRIES_64; if (machine_type("IA64")) mem.limit64 = kt->end; break; case DISPLAY_32: ascii_start = ASCII_START_32; typesz = SIZEOF_32BIT; location = &mem.u32; sprintf(readtype, "32-bit %s", addrtype); per_line = ENTRIES_32; break; case DISPLAY_16: ascii_start = ASCII_START_16; typesz = SIZEOF_16BIT; location = &mem.u16; sprintf(readtype, "16-bit %s", addrtype); per_line = ENTRIES_16; break; case DISPLAY_8: ascii_start = ASCII_START_8; typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "8-bit %s", addrtype); per_line = ENTRIES_8; break; case DISPLAY_ASCII: typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "ascii"); per_line = 60; displayed = 0; break; } if (flag & NO_ERROR) error_handle = RETURN_ON_ERROR|QUIET; else error_handle = FAULT_ON_ERROR; for (i = a = 0; i < count; i++) { if(!readmem(addr, memtype, location, typesz, readtype, error_handle)) { addr += typesz; lost += 1; continue; } if (!(flag & DISPLAY_ASCII) && (((i - lost) % per_line) == 0)) { if ((i - lost)) { if (flag & ASCII_ENDLINE) { fprintf(fp, " %s", hexchars); } fprintf(fp, "\n"); } fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); hx = 0; BZERO(hexchars, MAX_HEXCHARS_PER_LINE+1); linelen = VADDR_PRLEN + strlen(": "); } switch (flag & DISPLAY_TYPES) { case DISPLAY_64: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { if ((!mem.limit64 || (mem.u64 <= mem.limit64)) && in_ksymbol_range(mem.u64) && strlen(value_to_symstr(mem.u64, buf, 0))) { fprintf(fp, "%-16s ", buf); linelen += strlen(buf)+1; break; } if ((flag & SLAB_CACHE) && vaddr_to_kmem_cache(mem.u64, slab, !VERBOSE)) { if ((flag & SLAB_CACHE2) || CRASHDEBUG(1)) sprintf(buf, "[%llx:%s]", (ulonglong)mem.u64, slab); else sprintf(buf, "[%s]", slab); fprintf(fp, "%-16s ", buf); linelen += strlen(buf)+1; break; } } if (flag & HEXADECIMAL) { fprintf(fp, hex_64_fmt, LONG_LONG_PRLEN, mem.u64); linelen += (LONG_LONG_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, dec_64_fmt, mem.u64); else if (flag & UDECIMAL) fprintf(fp, dec_u64_fmt, mem.u64); break; case DISPLAY_32: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { if (in_ksymbol_range(mem.u32) && strlen(value_to_symstr(mem.u32, buf, 0))) { fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", buf); linelen += strlen(buf)+1; break; } if ((flag & SLAB_CACHE) && vaddr_to_kmem_cache(mem.u32, slab, !VERBOSE)) { if ((flag & SLAB_CACHE2) || CRASHDEBUG(1)) sprintf(buf, "[%x:%s]", mem.u32, slab); else sprintf(buf, "[%s]", slab); fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", buf); linelen += strlen(buf)+1; break; } } if (flag & NET_ENDIAN) mem.u32 = htonl(mem.u32); if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", INT_PRLEN, mem.u32 ); linelen += (INT_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%12d ", mem.u32 ); else if (flag & UDECIMAL) fprintf(fp, "%12u ", mem.u32 ); break; case DISPLAY_16: if (flag & NET_ENDIAN) mem.u16 = htons(mem.u16); if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", SHORT_PRLEN, mem.u16); linelen += (SHORT_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%5d ", mem.u16); else if (flag & UDECIMAL) fprintf(fp, "%5u ", mem.u16); break; case DISPLAY_8: if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", CHAR_PRLEN, mem.u8); linelen += (CHAR_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%3d ", mem.u8); else if (flag & UDECIMAL) fprintf(fp, "%3u ", mem.u8); break; case DISPLAY_ASCII: if (isprint(mem.u8)) { if ((a % per_line) == 0) { if (displayed && i) fprintf(fp, "\n"); fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); } fprintf(fp, "%c", mem.u8); displayed++; a++; } else { if (count == ASCII_UNLIMITED) return; a = 0; } break; } if (flag & HEXADECIMAL) { char* ptr; switch (flag & DISPLAY_TYPES) { case DISPLAY_64: ptr = (char*)&mem.u64; for (j = 0; j < SIZEOF_64BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_32: ptr = (char*)&mem.u32; for (j = 0; j < (SIZEOF_32BIT); j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_16: ptr = (char*)&mem.u16; for (j = 0; j < SIZEOF_16BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_8: ptr = (char*)&mem.u8; for (j = 0; j < SIZEOF_8BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; } } addr += typesz; } if ((flag & ASCII_ENDLINE) && hx) { pad_line(fp, ascii_start - linelen, ' '); fprintf(fp, " %s", hexchars); } if (lost != count ) fprintf(fp,"\n"); } /* * cmd_wr() is the sister routine of cmd_rd(), used to modify the contents * of memory. Like the "rd" command, the starting address may be entered * either symbolically or by address. The default modification size * is the size of a long data type. Write permission must exist on the * /dev/mem. The flags are similar to those used by rd: * * -p address argument is a physical address. * -u address argument is user virtual address (only if ambiguous). * -k address argument is user virtual address (only if ambiguous). * -8 write data in an 8-bit value. * -16 write data in a 16-bit value. * -32 write data in a 32-bit values (default on 32-bit machines). * -64 write data in a 64-bit values (default on 64-bit machines). * * Only one value of a given datasize may be modified. */ void cmd_wr(void) { int c; ulonglong value; int addr_entered, value_entered; int memtype; struct memloc mem; ulong addr; void *buf; long size; struct syment *sp; if (DUMPFILE()) error(FATAL, "not allowed on dumpfiles\n"); memtype = 0; buf = NULL; addr = 0; size = sizeof(void*); addr_entered = value_entered = FALSE; while ((c = getopt(argcnt, args, "fukp81:3:6:")) != EOF) { switch(c) { case '8': size = 1; break; case '1': if (!STREQ(optarg, "6")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 2; break; case '3': if (!STREQ(optarg, "2")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 4; break; case '6': if (!STREQ(optarg, "4")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 8; break; case 'p': memtype &= ~(UVADDR|KVADDR|FILEADDR); memtype = PHYSADDR; break; case 'u': memtype &= ~(PHYSADDR|KVADDR|FILEADDR); memtype = UVADDR; break; case 'k': memtype &= ~(PHYSADDR|UVADDR|FILEADDR); memtype = KVADDR; break; case 'f': /* * Unsupported, but can be forcibly implemented * by removing the DUMPFILE() check above and * recompiling. */ if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); memtype &= ~(PHYSADDR|UVADDR|KVADDR); memtype = FILEADDR; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind]) { if (*args[optind] == '(') addr = evall(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) addr = htoll(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) addr = sp->value; else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); return; } addr_entered = TRUE; if (args[++optind]) { value = stol(args[optind], FAULT_ON_ERROR, NULL); value_entered = TRUE; switch (size) { case 1: mem.u8 = (uint8_t)value; buf = (void *)&mem.u8; break; case 2: mem.u16 = (uint16_t)value; buf = (void *)&mem.u16; break; case 4: mem.u32 = (uint32_t)value; buf = (void *)&mem.u32; break; case 8: mem.u64 = (uint64_t)value; buf = (void *)&mem.u64; break; } } } if (!addr_entered || !value_entered) cmd_usage(pc->curcmd, SYNOPSIS); if (!memtype) memtype = vaddr_type(addr, CURRENT_CONTEXT()); switch (memtype) { case UVADDR: if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { error(INFO, "invalid user virtual address: %llx\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (!IS_KVADDR(addr)) { error(INFO, "invalid kernel virtual address: %llx\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } break; case PHYSADDR: break; case FILEADDR: break; case AMBIGUOUS: error(INFO, "ambiguous address: %llx (requires -p, -u or -k)\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } writemem(addr, memtype, buf, size, "write memory", FAULT_ON_ERROR); } char * format_stack_entry(struct bt_info *bt, char *retbuf, ulong value, ulong limit) { char buf[BUFSIZE]; char slab[BUFSIZE]; if (BITS32()) { if ((bt->flags & BT_FULL_SYM_SLAB) && accessible(value)) { if ((!limit || (value <= limit)) && in_ksymbol_range(value) && strlen(value_to_symstr(value, buf, 0))) sprintf(retbuf, INT_PRLEN == 16 ? "%-16s" : "%-8s", buf); else if (vaddr_to_kmem_cache(value, slab, !VERBOSE)) { if ((bt->flags & BT_FULL_SYM_SLAB2) || CRASHDEBUG(1)) sprintf(buf, "[%lx:%s]", value, slab); else sprintf(buf, "[%s]", slab); sprintf(retbuf, INT_PRLEN == 16 ? "%-16s" : "%-8s", buf); } else sprintf(retbuf, "%08lx", value); } else sprintf(retbuf, "%08lx", value); } else { if ((bt->flags & BT_FULL_SYM_SLAB) && accessible(value)) { if ((!limit || (value <= limit)) && in_ksymbol_range(value) && strlen(value_to_symstr(value, buf, 0))) sprintf(retbuf, "%-16s", buf); else if (vaddr_to_kmem_cache(value, slab, !VERBOSE)) { if ((bt->flags & BT_FULL_SYM_SLAB2) || CRASHDEBUG(1)) sprintf(buf, "[%lx:%s]", value, slab); else sprintf(buf, "[%s]", slab); sprintf(retbuf, "%-16s", buf); } else sprintf(retbuf, "%016lx", value); } else sprintf(retbuf, "%016lx", value); } return retbuf; } /* * For processors with "traditional" kernel/user address space distinction. */ int generic_is_kvaddr(ulong addr) { return (addr >= (ulong)(machdep->kvbase)); } /* * NOTE: Perhaps even this generic version should tighten up requirements * by calling uvtop()? */ int generic_is_uvaddr(ulong addr, struct task_context *tc) { return (addr < (ulong)(machdep->kvbase)); } /* * Raw dump of a task's stack, forcing symbolic output. */ void raw_stack_dump(ulong stackbase, ulong size) { display_memory(stackbase, size/sizeof(ulong), HEXADECIMAL|DISPLAY_DEFAULT|SYMBOLIC, KVADDR, NULL); } /* * Raw data dump, with the option of symbolic output. */ void raw_data_dump(ulong addr, long count, int symbolic) { long wordcnt; ulonglong address; int memtype; switch (sizeof(long)) { case SIZEOF_32BIT: wordcnt = count/SIZEOF_32BIT; if (count % SIZEOF_32BIT) wordcnt++; break; case SIZEOF_64BIT: wordcnt = count/SIZEOF_64BIT; if (count % SIZEOF_64BIT) wordcnt++; break; default: break; } if (pc->curcmd_flags & MEMTYPE_FILEADDR) { address = pc->curcmd_private; memtype = FILEADDR; } else if (pc->curcmd_flags & MEMTYPE_UVADDR) { address = (ulonglong)addr; memtype = UVADDR; } else { address = (ulonglong)addr; memtype = KVADDR; } display_memory(address, wordcnt, HEXADECIMAL|DISPLAY_DEFAULT|(symbolic ? SYMBOLIC : ASCII_ENDLINE), memtype, NULL); } /* * Quietly checks the accessibility of a memory location. */ int accessible(ulong kva) { ulong tmp; return(readmem(kva, KVADDR, &tmp, sizeof(ulong), "accessible check", RETURN_ON_ERROR|QUIET)); } /* * readmem() is by far *the* workhorse of this whole program. It reads * memory from /dev/kmem, /dev/mem the dumpfile or /proc/kcore, whichever * is appropriate: * * addr a user, kernel or physical memory address. * memtype addr type: UVADDR, KVADDR, PHYSADDR, XENMACHADDR or FILEADDR * buffer supplied buffer to read the data into. * size number of bytes to read. * type string describing the request -- helpful when the read fails. * error_handle what to do if the read fails: FAULT_ON_ERROR kills the command * immediately; RETURN_ON_ERROR returns FALSE; QUIET suppresses * the error message. */ #define PRINT_ERROR_MESSAGE ((!(error_handle & QUIET) && !STREQ(pc->curcmd, "search")) || \ (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) || CRASHDEBUG(2)) #define INVALID_UVADDR "invalid user virtual address: %llx type: \"%s\"\n" #define INVALID_KVADDR "invalid kernel virtual address: %llx type: \"%s\"\n" #define SEEK_ERRMSG "seek error: %s address: %llx type: \"%s\"\n" #define READ_ERRMSG "read error: %s address: %llx type: \"%s\"\n" #define WRITE_ERRMSG "write error: %s address: %llx type: \"%s\"\n" #define PAGE_EXCLUDED_ERRMSG "page excluded: %s address: %llx type: \"%s\"\n" #define RETURN_ON_PARTIAL_READ() \ if ((error_handle & RETURN_PARTIAL) && (size < orig_size)) { \ if (CRASHDEBUG(1)) \ error(INFO, "RETURN_PARTIAL: \"%s\" read: %ld of %ld\n",\ type, orig_size - size, orig_size); \ return TRUE; \ } int readmem(ulonglong addr, int memtype, void *buffer, long size, char *type, ulong error_handle) { int fd; long cnt, orig_size; physaddr_t paddr; ulonglong pseudo; char *bufptr; if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, memtype_string(memtype, 1), type, size, error_handle_string(error_handle), (ulong)buffer); bufptr = (char *)buffer; orig_size = size; if (size <= 0) { if (PRINT_ERROR_MESSAGE) error(INFO, "invalid size request: %ld type: \"%s\"\n", size, type); goto readmem_error; } fd = REMOTE_MEMSRC() ? pc->sockfd : (ACTIVE() ? pc->mfd : pc->dfd); /* * Screen out any error conditions. */ switch (memtype) { case UVADDR: if (!CURRENT_CONTEXT()) { if (PRINT_ERROR_MESSAGE) error(INFO, "no current user process\n"); goto readmem_error; } if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto readmem_error; } break; case KVADDR: if (LKCD_DUMPFILE()) addr = fix_lkcd_address(addr); if (!IS_KVADDR(addr)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto readmem_error; } break; case PHYSADDR: case XENMACHADDR: break; case FILEADDR: return generic_read_dumpfile(addr, buffer, size, type, error_handle); } while (size > 0) { switch (memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto readmem_error; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto readmem_error; } break; case PHYSADDR: paddr = addr; break; case XENMACHADDR: pseudo = xen_m2p(addr); if (pseudo == XEN_MACHADDR_NOT_FOUND) { pc->curcmd_flags |= XEN_MACHINE_ADDR; paddr = addr; } else paddr = pseudo | PAGEOFFSET(addr); break; } /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(paddr); if (cnt > size) cnt = size; if (CRASHDEBUG(4)) fprintf(fp, "<%s: addr: %llx paddr: %llx cnt: %ld>\n", readmem_function_name(), addr, (unsigned long long)paddr, cnt); if (memtype == KVADDR) pc->curcmd_flags |= MEMTYPE_KVADDR; else pc->curcmd_flags &= ~MEMTYPE_KVADDR; switch (READMEM(fd, bufptr, cnt, (memtype == PHYSADDR) || (memtype == XENMACHADDR) ? 0 : addr, paddr)) { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; case READ_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, READ_ERRMSG, memtype_string(memtype, 0), addr, type); if ((pc->flags & DEVMEM) && (kt->flags & PRE_KERNEL_INIT) && !(error_handle & NO_DEVMEM_SWITCH) && devmem_is_restricted() && switch_to_proc_kcore()) return(readmem(addr, memtype, bufptr, size, type, error_handle)); goto readmem_error; case PAGE_EXCLUDED: RETURN_ON_PARTIAL_READ(); if (PRINT_ERROR_MESSAGE) error(INFO, PAGE_EXCLUDED_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; default: break; } addr += cnt; bufptr += cnt; size -= cnt; } return TRUE; readmem_error: switch (error_handle) { case (FAULT_ON_ERROR): case (QUIET|FAULT_ON_ERROR): if (pc->flags & IN_FOREACH) RESUME_FOREACH(); RESTART(); case (RETURN_ON_ERROR): case (RETURN_PARTIAL|RETURN_ON_ERROR): case (QUIET|RETURN_ON_ERROR): break; } return FALSE; } /* * Accept anything... */ int generic_verify_paddr(physaddr_t paddr) { return TRUE; } /* * Read from /dev/mem. */ int read_dev_mem(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int readcnt; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } /* * /dev/mem disallows anything >= __pa(high_memory) * * However it will allow 64-bit lseeks to anywhere, and when followed * by pulling a 32-bit address from the 64-bit file position, it * quietly returns faulty data from the (wrapped-around) address. */ if (vt->high_memory && (paddr >= (physaddr_t)(VTOP(vt->high_memory)))) { readcnt = 0; errno = 0; goto try_dev_kmem; } if (lseek(fd, (off_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; next_read: errno = 0; readcnt = read(fd, bufptr, cnt); if ((readcnt != cnt) && CRASHDEBUG(4)) { if (errno) perror("/dev/mem"); error(INFO, "read(/dev/mem, %lx, %ld): %ld (%lx)\n", paddr, cnt, readcnt, readcnt); } try_dev_kmem: /* * On 32-bit intel architectures high memory can can only be accessed * via vmalloc'd addresses. However, /dev/mem returns 0 bytes, and * non-reserved memory pages can't be mmap'd, so the only alternative * is to read it from /dev/kmem. */ if ((readcnt != cnt) && BITS32() && !readcnt && !errno && IS_VMALLOC_ADDR(addr)) readcnt = read_dev_kmem(addr, bufptr, cnt); /* * The 2.6 valid_phys_addr_range() can potentially shorten the * count of a legitimate read request. So far this has only been * seen on an ia64 where a kernel page straddles an EFI segment. */ if ((readcnt != cnt) && readcnt && (machdep->flags & DEVMEMRD) && !errno) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "read(/dev/mem, %lx, %ld): %ld (%lx)\n", paddr, cnt, readcnt, readcnt); cnt -= readcnt; bufptr += readcnt; goto next_read; } if (readcnt != cnt) return READ_ERROR; return readcnt; } /* * Write to /dev/mem. */ int write_dev_mem(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return WRITE_ERROR; } if (lseek(fd, (off_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; if (write(fd, bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * The first required reads of memory are done in kernel_init(), * so if there's a fatal read error of /dev/mem, display a warning * message if it appears that CONFIG_STRICT_DEVMEM is in effect. * On x86 and x86_64, only the first 256 pages of physical memory * are accessible: * * #ifdef CONFIG_STRICT_DEVMEM * int devmem_is_allowed(unsigned long pagenr) * { * if (pagenr <= 256) * return 1; * if (!page_is_ram(pagenr)) * return 1; * return 0; * } * #endif * * It would probably suffice to simply check for the existence of * devmem_is_allowed(), but on x86 and x86_64 verify pfn 256 reads OK, * and 257 fails. * * Update: a patch has been posted to LKML to fix the off-by-one error * by changing "<= 256" to "< 256": * * https://lkml.org/lkml/2012/8/28/357 * * The X86/X86_64 lower-boundary pfn check below has been changed * (preemptively) from 256 to 255. * * In any case, if that x86/x86_64 check fails to prove CONFIG_STRICT_DEVMEM * is configured, then the function will check that "jiffies" can be read, * as is done for the other architectures. * */ static int devmem_is_restricted(void) { long tmp; int restricted; /* * Check for pre-CONFIG_STRICT_DEVMEM kernels. */ if (!kernel_symbol_exists("devmem_is_allowed")) { if (machine_type("ARM") || machine_type("ARM64") || machine_type("X86") || machine_type("X86_64") || machine_type("PPC") || machine_type("PPC64")) return FALSE; } restricted = FALSE; if (STREQ(pc->live_memsrc, "/dev/mem")) { if (machine_type("X86") || machine_type("X86_64")) { if (readmem(255*PAGESIZE(), PHYSADDR, &tmp, sizeof(long), "devmem_is_allowed - pfn 255", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH) && !(readmem(257*PAGESIZE(), PHYSADDR, &tmp, sizeof(long), "devmem_is_allowed - pfn 257", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH))) restricted = TRUE; } if (kernel_symbol_exists("jiffies") && !readmem(symbol_value("jiffies"), KVADDR, &tmp, sizeof(ulong), "devmem_is_allowed - jiffies", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH)) restricted = TRUE; if (restricted) error(INFO, "this kernel may be configured with CONFIG_STRICT_DEVMEM," " which\n renders /dev/mem unusable as a live memory " "source.\n"); } return restricted; } static int switch_to_proc_kcore(void) { close(pc->mfd); if (file_exists("/proc/kcore", NULL)) error(INFO, "trying /proc/kcore as an alternative to /dev/mem\n\n"); else return FALSE; if ((pc->mfd = open("/proc/kcore", O_RDONLY)) < 0) { error(INFO, "/proc/kcore: %s\n", strerror(errno)); return FALSE; } if (!proc_kcore_init(fp)) { error(INFO, "/proc/kcore: initialization failed\n"); return FALSE; } pc->flags &= ~DEVMEM; pc->flags |= PROC_KCORE; pc->readmem = read_proc_kcore; pc->writemem = write_proc_kcore; pc->live_memsrc = "/proc/kcore"; return TRUE; } /* * Read from memory driver. */ int read_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (pc->curcmd_flags & XEN_MACHINE_ADDR) return READ_ERROR; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } lseek(fd, (loff_t)paddr, SEEK_SET); if (read(fd, bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to memory driver. */ int write_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (!(MEMORY_DRIVER_DEVICE_MODE & S_IWUSR)) return (error(FATAL, "cannot write to %s!\n", pc->live_memsrc)); if (lseek(fd, (loff_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; if (write(fd, bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * Read from an MCLX formatted dumpfile. */ int read_mclx_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (vas_lseek((ulong)paddr, SEEK_SET)) return SEEK_ERROR; if (vas_read((void *)bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to an MCLX formatted dumpfile. This only modifies the buffered * copy only; if it gets flushed, the modification is lost. */ int write_mclx_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (vas_lseek((ulong)paddr, SEEK_SET)) return SEEK_ERROR; if (vas_write((void *)bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * Read from an LKCD formatted dumpfile. */ int read_lkcd_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { set_lkcd_fp(fp); if (!lkcd_lseek(paddr)) return SEEK_ERROR; if (lkcd_read((void *)bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to an LKCD formatted dumpfile. (dummy routine -- not allowed) */ int write_lkcd_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return (error(FATAL, "cannot write to an LKCD compressed dump!\n")); } /* * Read from network daemon. */ int read_daemon(int fd, void *bufptr, int cnt, ulong vaddr, physaddr_t paddr) { if (remote_memory_read(pc->rmfd, bufptr, cnt, paddr, -1) == cnt) return cnt; if (!IS_VMALLOC_ADDR(vaddr) || DUMPFILE()) return READ_ERROR; /* * On 32-bit architectures w/memory above ~936MB, * that memory can only be accessed via vmalloc'd * addresses. However, /dev/mem returns 0 bytes, * and non-reserved memory pages can't be mmap'd, so * the only alternative is to read it from /dev/kmem. */ if (BITS32() && remote_memory_read(pc->rkfd, bufptr, cnt, vaddr, -1) == cnt) return cnt; return READ_ERROR; } /* * Write to network daemon. */ int write_daemon(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return (error(FATAL, "writing to daemon not supported yet [TBD]\n")); } /* * Turn the memtype bitmask into a string. */ static char *memtype_string(int memtype, int debug) { static char membuf[40]; switch (memtype) { case UVADDR: sprintf(membuf, debug ? "UVADDR" : "user virtual"); break; case KVADDR: sprintf(membuf, debug ? "KVADDR" : "kernel virtual"); break; case PHYSADDR: sprintf(membuf, debug ? "PHYSADDR" : "physical"); break; case XENMACHADDR: sprintf(membuf, debug ? "XENMACHADDR" : "xen machine"); break; case FILEADDR: sprintf(membuf, debug ? "FILEADDR" : "dumpfile"); break; default: if (debug) sprintf(membuf, "0x%x (?)", memtype); else sprintf(membuf, "unknown"); break; } return membuf; } /* * Turn the error_handle bitmask into a string, * Note: FAULT_ON_ERROR == 0 */ static char *error_handle_string(ulong error_handle) { static char ebuf[20]; int others; sprintf(ebuf, "("); others = 0; if (error_handle & RETURN_ON_ERROR) sprintf(&ebuf[strlen(ebuf)], "%sROE", others++ ? "|" : ""); if (error_handle & FAULT_ON_ERROR) sprintf(&ebuf[strlen(ebuf)], "%sFOE", others++ ? "|" : ""); if (error_handle & QUIET) sprintf(&ebuf[strlen(ebuf)], "%sQ", others++ ? "|" : ""); if (error_handle & HEX_BIAS) sprintf(&ebuf[strlen(ebuf)], "%sHB", others++ ? "|" : ""); if (error_handle & RETURN_PARTIAL) sprintf(&ebuf[strlen(ebuf)], "%sRP", others++ ? "|" : ""); if (error_handle & NO_DEVMEM_SWITCH) sprintf(&ebuf[strlen(ebuf)], "%sNDS", others++ ? "|" : ""); strcat(ebuf, ")"); return ebuf; } /* * Sister routine to readmem(). */ int writemem(ulonglong addr, int memtype, void *buffer, long size, char *type, ulong error_handle) { int fd; long cnt; physaddr_t paddr; char *bufptr; if (CRASHDEBUG(1)) fprintf(fp, "writemem: %llx, %s, \"%s\", %ld, %s %lx\n", addr, memtype_string(memtype, 1), type, size, error_handle_string(error_handle), (ulong)buffer); if (size < 0) { if (PRINT_ERROR_MESSAGE) error(INFO, "invalid size request: %ld\n", size); goto writemem_error; } bufptr = (char *)buffer; fd = ACTIVE() ? pc->mfd : pc->dfd; /* * Screen out any error conditions. */ switch (memtype) { case UVADDR: if (!CURRENT_CONTEXT()) { if (PRINT_ERROR_MESSAGE) error(INFO, "no current user process\n"); goto writemem_error; } if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto writemem_error; } break; case KVADDR: if (!IS_KVADDR(addr)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto writemem_error; } break; case PHYSADDR: break; case FILEADDR: return generic_write_dumpfile(addr, buffer, size, type, error_handle); } while (size > 0) { switch (memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto writemem_error; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto writemem_error; } break; case PHYSADDR: paddr = addr; break; } /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(paddr); if (cnt > size) cnt = size; switch (pc->writemem(fd, bufptr, cnt, addr, paddr)) { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(memtype, 0), addr, type); goto writemem_error; case WRITE_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, WRITE_ERRMSG, memtype_string(memtype, 0), addr, type); goto writemem_error; default: break; } addr += cnt; bufptr += cnt; size -= cnt; } return TRUE; writemem_error: switch (error_handle) { case (FAULT_ON_ERROR): case (QUIET|FAULT_ON_ERROR): RESTART(); case (RETURN_ON_ERROR): case (QUIET|RETURN_ON_ERROR): break; } return FALSE; } /* * When /dev/mem won't allow access, try /dev/kmem. */ static ssize_t read_dev_kmem(ulong vaddr, char *bufptr, long cnt) { ssize_t readcnt; if (pc->kfd < 0) { if ((pc->kfd = open("/dev/kmem", O_RDONLY)) < 0) return 0; } if (lseek(pc->kfd, vaddr, SEEK_SET) == -1) return 0; readcnt = read(pc->kfd, bufptr, cnt); if (readcnt != cnt) readcnt = 0; return readcnt; } /* * Generic dumpfile read/write functions to handle FILEADDR * memtype arguments to readmem() and writemem(). These are * not to be confused with pc->readmem/writemem plug-ins. */ static int generic_read_dumpfile(ulonglong addr, void *buffer, long size, char *type, ulong error_handle) { int fd; int retval; retval = TRUE; if (!pc->dumpfile) error(FATAL, "command requires a dumpfile\n"); if ((fd = open(pc->dumpfile, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (lseek(fd, addr, SEEK_SET) == -1) { if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } else if (read(fd, buffer, size) != size) { if (PRINT_ERROR_MESSAGE) error(INFO, READ_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } close(fd); return retval; } static int generic_write_dumpfile(ulonglong addr, void *buffer, long size, char *type, ulong error_handle) { int fd; int retval; retval = TRUE; if (!pc->dumpfile) error(FATAL, "command requires a dumpfile\n"); if ((fd = open(pc->dumpfile, O_WRONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (lseek(fd, addr, SEEK_SET) == -1) { if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } else if (write(fd, buffer, size) != size) { if (PRINT_ERROR_MESSAGE) error(INFO, WRITE_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } close(fd); return retval; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ int kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { physaddr_t unused; return (machdep->kvtop(tc ? tc : CURRENT_CONTEXT(), kvaddr, paddr ? paddr : &unused, verbose)); } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ int uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { return(machdep->uvtop(tc, vaddr, paddr, verbose)); } /* * The vtop command does a verbose translation of a user or kernel virtual * address into it physical address. The pte translation is shown by * passing the VERBOSE flag to kvtop() or uvtop(). If it's a user virtual * address, the vm_area_struct data containing the page is displayed. * Lastly, the mem_map[] page data containing the address is displayed. */ void cmd_vtop(void) { int c; ulong vaddr, context; int others; ulong vtop_flags, loop_vtop_flags; struct task_context *tc; vtop_flags = loop_vtop_flags = 0; tc = NULL; while ((c = getopt(argcnt, args, "ukc:")) != EOF) { switch(c) { case 'c': switch (str_to_context(optarg, &context, &tc)) { case STR_PID: case STR_TASK: vtop_flags |= USE_USER_PGD; break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; case 'u': vtop_flags |= UVADDR; break; case 'k': vtop_flags |= KVADDR; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (!tc && !(tc = CURRENT_CONTEXT())) error(FATAL, "no current user process\n"); if ((vtop_flags & (UVADDR|KVADDR)) == (UVADDR|KVADDR)) error(FATAL, "-u and -k options are mutually exclusive\n"); others = 0; while (args[optind]) { vaddr = htol(args[optind], FAULT_ON_ERROR, NULL); if (!(vtop_flags & (UVADDR|KVADDR))) { switch (vaddr_type(vaddr, tc)) { case UVADDR: loop_vtop_flags = UVADDR; break; case KVADDR: loop_vtop_flags = KVADDR; break; case AMBIGUOUS: error(FATAL, "ambiguous address: %lx (requires -u or -k)\n", vaddr); break; } } else loop_vtop_flags = 0; if (others++) fprintf(fp, "\n"); do_vtop(vaddr, tc, vtop_flags | loop_vtop_flags); if (REMOTE() && CRASHDEBUG(1)) { ulong paddr = remote_vtop(tc->processor, vaddr); if (paddr) fprintf(fp, "rvtop(%lx)=%lx\n", vaddr, paddr); } optind++; } } /* * Do the work for cmd_vtop(), or less likely, foreach(). */ void do_vtop(ulong vaddr, struct task_context *tc, ulong vtop_flags) { physaddr_t paddr; ulong vma, page; int page_exists; struct meminfo meminfo; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int memtype = 0; switch (vtop_flags & (UVADDR|KVADDR)) { case UVADDR: memtype = UVADDR; break; case KVADDR: memtype = KVADDR; break; case (UVADDR|KVADDR): error(FATAL, "-u and -k options are mutually exclusive\n"); break; default: switch (vaddr_type(vaddr, tc)) { case UVADDR: memtype = UVADDR; break; case KVADDR: memtype = KVADDR; break; case AMBIGUOUS: error(FATAL, "ambiguous address: %lx (requires -u or -k)\n", vaddr); break; } break; } page_exists = paddr = 0; switch (memtype) { case UVADDR: fprintf(fp, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); if (!IN_TASK_VMA(tc->task, vaddr)) { fprintf(fp, "%s (not accessible)\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr))); return; } if (!uvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } uvtop(tc, vaddr, &paddr, VERBOSE); fprintf(fp, "\n"); vma = vm_area_dump(tc->task, UVADDR, vaddr, 0); if (!page_exists) { if (swap_location(paddr, buf1)) fprintf(fp, "\nSWAP: %s\n", buf1); else if (vma_file_offset(vma, vaddr, buf1)) fprintf(fp, "\nFILE: %s\n", buf1); } break; case KVADDR: fprintf(fp, "%s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); if (!IS_KVADDR(vaddr)) { fprintf(fp, "%-8lx (not a kernel virtual address)\n\n", vaddr); return; } if (vtop_flags & USE_USER_PGD) { if (!uvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } uvtop(tc, vaddr, &paddr, VERBOSE); } else { if (!kvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } kvtop(tc, vaddr, &paddr, VERBOSE); } break; } fprintf(fp, "\n"); if (page_exists && phys_to_page(paddr, &page)) { if ((pc->flags & DEVMEM) && (paddr >= VTOP(vt->high_memory))) return; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.flags = ADDRESS_SPECIFIED; meminfo.spec_addr = paddr; meminfo.memtype = PHYSADDR; dump_mem_map(&meminfo); } } /* * Runs PTOV() on the physical address argument or translates * a per-cpu offset and cpu specifier. */ void cmd_ptov(void) { int c, len, unknown; ulong vaddr; physaddr_t paddr, paddr_test; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int others; char *cpuspec; ulong *cpus; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); others = 0; cpuspec = NULL; cpus = NULL; while (args[optind]) { cpuspec = strchr(args[optind], ':'); if (cpuspec) { *cpuspec++ = NULLCHAR; cpus = get_cpumask_buf(); if (STREQ(cpuspec, "")) SET_BIT(cpus, CURRENT_CONTEXT()->processor); else make_cpumask(cpuspec, cpus, FAULT_ON_ERROR, NULL); } paddr = htoll(args[optind], FAULT_ON_ERROR, NULL); if (cpuspec) { sprintf(buf1, "[%d]", kt->cpus-1); len = strlen(buf1) + 2; fprintf(fp, "%sPER-CPU OFFSET: %llx\n", others++ ? "\n" : "", (ulonglong)paddr); fprintf(fp, " %s %s\n", mkstring(buf1, len, LJUST, "CPU"), mkstring(buf2, VADDR_PRLEN, LJUST, "VIRTUAL")); for (c = 0; c < kt->cpus; c++) { if (!NUM_IN_BITMAP(cpus, c)) continue; vaddr = paddr + kt->__per_cpu_offset[c]; sprintf(buf1, "[%d]", c); fprintf(fp, " %s%lx", mkstring(buf2, len, LJUST, buf1), vaddr); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } FREEBUF(cpus); } else { vaddr = PTOV(paddr); unknown = BITS32() && (!kvtop(0, vaddr, &paddr_test, 0) || (paddr_test != paddr)); fprintf(fp, "%s%s %s\n", others++ ? "\n" : "", mkstring(buf1, VADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); fprintf(fp, "%s %s\n", unknown ? mkstring(buf1, VADDR_PRLEN, LJUST, "unknown") : mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); } optind++; } } /* * Runs PTOB() on the page frame number to get the page address. */ void cmd_ptob(void) { ulonglong value; optind = 1; if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { value = stoll(args[optind], FAULT_ON_ERROR, NULL); fprintf(fp, "%llx: %llx\n", value, PTOB(value)); optind++; } } /* * Runs BTOP() on the address to get the page frame number. */ void cmd_btop(void) { ulonglong value; optind = 1; if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { value = htoll(args[optind], FAULT_ON_ERROR, NULL); fprintf(fp, "%llx: %llx\n", value, BTOP(value)); optind++; } } /* * This command displays basic virtual memory information of a context, * consisting of a pointer to its mm_struct, its RSS and total virtual * memory size; and a list of pointers to each vm_area_struct, its starting * and ending address, and vm_flags value. The argument can be a task * address or a PID number; if no args, the current context is used. */ void cmd_vm(void) { int c; ulong flag; ulong value; ulong single_vma; ulonglong llvalue; struct task_context *tc; struct reference reference, *ref; unsigned int radix; int subsequent; flag = 0; single_vma = 0; radix = 0; ref = NULL; BZERO(&reference, sizeof(struct reference)); while ((c = getopt(argcnt, args, "f:pmvR:P:xdM:")) != EOF) { switch(c) { case 'M': pc->curcmd_private = htoll(optarg, FAULT_ON_ERROR, NULL); pc->curcmd_flags |= MM_STRUCT_FORCE; break; case 'f': if (flag) argerrs++; else { llvalue = htoll(optarg, FAULT_ON_ERROR, NULL); do_vm_flags(llvalue); return; } break; case 'p': if (flag) argerrs++; else flag |= PHYSADDR; break; case 'm': if (flag) argerrs++; else flag |= PRINT_MM_STRUCT; break; case 'v': if (flag) argerrs++; else flag |= PRINT_VMA_STRUCTS; break; case 'R': if (ref) { error(INFO, "only one -R option allowed\n"); argerrs++; } else if (flag && !(flag & PHYSADDR)) argerrs++; else { ref = &reference; ref->str = optarg; flag |= PHYSADDR; } break; case 'P': if (flag) argerrs++; else { flag |= PRINT_SINGLE_VMA; single_vma = htol(optarg, FAULT_ON_ERROR, NULL); } break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (radix == 10) flag |= PRINT_RADIX_10; else if (radix == 16) flag |= PRINT_RADIX_16; if (!args[optind]) { if (!ref) print_task_header(fp, CURRENT_CONTEXT(), 0); vm_area_dump(CURRENT_TASK(), flag, single_vma, ref); return; } subsequent = 0; while (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!ref) print_task_header(fp, tc, subsequent++); vm_area_dump(tc->task, flag, single_vma, ref); } break; case STR_TASK: if (!ref) print_task_header(fp, tc, subsequent++); vm_area_dump(tc->task, flag, single_vma, ref); break; case STR_INVALID: error(INFO, "%sinvalid task or pid value: %s\n", subsequent++ ? "\n" : "", args[optind]); break; } optind++; } } /* * Translate a vm_flags value. */ #define VM_READ 0x00000001ULL /* currently active flags */ #define VM_WRITE 0x00000002ULL #define VM_EXEC 0x00000004ULL #define VM_SHARED 0x00000008ULL #define VM_MAYREAD 0x00000010ULL /* limits for mprotect() etc */ #define VM_MAYWRITE 0x00000020ULL #define VM_MAYEXEC 0x00000040ULL #define VM_MAYSHARE 0x00000080ULL #define VM_GROWSDOWN 0x00000100ULL /* general info on the segment */ #define VM_GROWSUP 0x00000200ULL #define VM_NOHUGEPAGE 0x00000200ULL /* MADV_NOHUGEPAGE marked this vma */ #define VM_SHM 0x00000400ULL /* shared memory area, don't swap out */ #define VM_PFNMAP 0x00000400ULL #define VM_DENYWRITE 0x00000800ULL /* ETXTBSY on write attempts.. */ #define VM_EXECUTABLE 0x00001000ULL #define VM_LOCKED 0x00002000ULL #define VM_IO 0x00004000ULL /* Memory mapped I/O or similar */ #define VM_SEQ_READ 0x00008000ULL /* App will access data sequentially */ #define VM_RAND_READ 0x00010000ULL /* App will not benefit from clustered reads */ #define VM_DONTCOPY 0x00020000ULL /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000ULL /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000ULL /* Don't unmap it from swap_out */ #define VM_BIGPAGE 0x00100000ULL /* bigpage mappings, no pte's */ #define VM_BIGMAP 0x00200000ULL /* user wants bigpage mapping */ #define VM_WRITECOMBINED 0x00100000ULL /* Write-combined */ #define VM_NONCACHED 0x00200000ULL /* Noncached access */ #define VM_HUGETLB 0x00400000ULL /* Huge tlb Page*/ #define VM_ACCOUNT 0x00100000ULL /* Memory is a vm accounted object */ #define VM_NONLINEAR 0x00800000ULL /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000ULL /* T if mapped copy of data (nommu mmap) */ #define VM_HUGEPAGE 0x01000000ULL /* MADV_HUGEPAGE marked this vma */ #define VM_INSERTPAGE 0x02000000ULL /* The vma has had "vm_insert_page()" done on it */ #define VM_ALWAYSDUMP 0x04000000ULL /* Always include in core dumps */ #define VM_CAN_NONLINEAR 0x08000000ULL /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000ULL /* Can contain "struct page" and pure PFN pages */ #define VM_SAO 0x20000000ULL /* Strong Access Ordering (powerpc) */ #define VM_PFN_AT_MMAP 0x40000000ULL /* PFNMAP vma that is fully mapped at mmap time */ #define VM_MERGEABLE 0x80000000ULL /* KSM may merge identical pages */ static void do_vm_flags(ulonglong flags) { int others; others = 0; fprintf(fp, "%llx: (", flags); if (flags & VM_READ) { fprintf(fp, "READ"); others++; } if (flags & VM_WRITE) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (flags & VM_EXEC) fprintf(fp, "%sEXEC", others++ ? "|" : ""); if (flags & VM_SHARED) fprintf(fp, "%sSHARED", others++ ? "|" : ""); if (flags & VM_MAYREAD) fprintf(fp, "%sMAYREAD", others++ ? "|" : ""); if (flags & VM_MAYWRITE) fprintf(fp, "%sMAYWRITE", others++ ? "|" : ""); if (flags & VM_MAYEXEC) fprintf(fp, "%sMAYEXEC", others++ ? "|" : ""); if (flags & VM_MAYSHARE) fprintf(fp, "%sMAYSHARE", others++ ? "|" : ""); if (flags & VM_GROWSDOWN) fprintf(fp, "%sGROWSDOWN", others++ ? "|" : ""); if (kernel_symbol_exists("expand_upwards")) { if (flags & VM_GROWSUP) fprintf(fp, "%sGROWSUP", others++ ? "|" : ""); } else if (flags & VM_NOHUGEPAGE) fprintf(fp, "%sNOHUGEPAGE", others++ ? "|" : ""); if (flags & VM_SHM) { if (THIS_KERNEL_VERSION > LINUX(2,6,17)) fprintf(fp, "%sPFNMAP", others++ ? "|" : ""); else fprintf(fp, "%sSHM", others++ ? "|" : ""); } if (flags & VM_DENYWRITE) fprintf(fp, "%sDENYWRITE", others++ ? "|" : ""); if (flags & VM_EXECUTABLE) fprintf(fp, "%sEXECUTABLE", others++ ? "|" : ""); if (flags & VM_LOCKED) fprintf(fp, "%sLOCKED", others++ ? "|" : ""); if (flags & VM_IO) fprintf(fp, "%sIO", others++ ? "|" : ""); if (flags & VM_SEQ_READ) fprintf(fp, "%sSEQ_READ", others++ ? "|" : ""); if (flags & VM_RAND_READ) fprintf(fp, "%sRAND_READ", others++ ? "|" : ""); if (flags & VM_DONTCOPY) fprintf(fp, "%sDONTCOPY", others++ ? "|" : ""); if (flags & VM_DONTEXPAND) fprintf(fp, "%sDONTEXPAND", others++ ? "|" : ""); if (flags & VM_RESERVED) fprintf(fp, "%sRESERVED", others++ ? "|" : ""); if (symbol_exists("nr_bigpages") && (THIS_KERNEL_VERSION == LINUX(2,4,9))) { if (flags & VM_BIGPAGE) fprintf(fp, "%sBIGPAGE", others++ ? "|" : ""); if (flags & VM_BIGMAP) fprintf(fp, "%sBIGMAP", others++ ? "|" : ""); } else { if ((THIS_KERNEL_VERSION < LINUX(2,4,21)) && (flags & VM_WRITECOMBINED)) fprintf(fp, "%sWRITECOMBINED", others++ ? "|" : ""); if ((THIS_KERNEL_VERSION < LINUX(2,4,21)) && (flags & VM_NONCACHED)) fprintf(fp, "%sNONCACHED", others++ ? "|" : ""); if (flags & VM_HUGETLB) fprintf(fp, "%sHUGETLB", others++ ? "|" : ""); if (flags & VM_ACCOUNT) fprintf(fp, "%sACCOUNT", others++ ? "|" : ""); } if (flags & VM_NONLINEAR) fprintf(fp, "%sNONLINEAR", others++ ? "|" : ""); if (flags & VM_HUGEPAGE) { if (MEMBER_EXISTS("mm_struct", "pmd_huge_pte")) fprintf(fp, "%sHUGEPAGE", others++ ? "|" : ""); else fprintf(fp, "%sMAPPED_COPY", others++ ? "|" : ""); } if (flags & VM_INSERTPAGE) fprintf(fp, "%sINSERTPAGE", others++ ? "|" : ""); if (flags & VM_ALWAYSDUMP) fprintf(fp, "%sALWAYSDUMP", others++ ? "|" : ""); if (flags & VM_CAN_NONLINEAR) fprintf(fp, "%sCAN_NONLINEAR", others++ ? "|" : ""); if (flags & VM_MIXEDMAP) fprintf(fp, "%sMIXEDMAP", others++ ? "|" : ""); if (flags & VM_SAO) fprintf(fp, "%sSAO", others++ ? "|" : ""); if (flags & VM_PFN_AT_MMAP) fprintf(fp, "%sPFN_AT_MMAP", others++ ? "|" : ""); if (flags & VM_MERGEABLE) fprintf(fp, "%sMERGEABLE", others++ ? "|" : ""); fprintf(fp, ")\n"); } /* * Read whatever size vm_area_struct.vm_flags happens to be into a ulonglong. */ static ulonglong get_vm_flags(char *vma_buf) { ulonglong vm_flags = 0; if (SIZE(vm_area_struct_vm_flags) == sizeof(short)) vm_flags = USHORT(vma_buf + OFFSET(vm_area_struct_vm_flags)); else if (SIZE(vm_area_struct_vm_flags) == sizeof(long)) vm_flags = ULONG(vma_buf+ OFFSET(vm_area_struct_vm_flags)); else if (SIZE(vm_area_struct_vm_flags) == sizeof(long long)) vm_flags = ULONGLONG(vma_buf+ OFFSET(vm_area_struct_vm_flags)); else error(INFO, "questionable vm_area_struct.vm_flags size: %d\n", SIZE(vm_area_struct_vm_flags)); return vm_flags; } static void vm_cleanup(void *arg) { struct task_context *tc; pc->cmd_cleanup = NULL; pc->cmd_cleanup_arg = NULL; tc = (struct task_context *)arg; tc->mm_struct = 0; } static int is_valid_mm(ulong mm) { char kbuf[BUFSIZE]; char *p; int mm_count; if (!(p = vaddr_to_kmem_cache(mm, kbuf, VERBOSE))) goto bailout; if (!STRNEQ(p, "mm_struct")) goto bailout; readmem(mm + OFFSET(mm_struct_mm_count), KVADDR, &mm_count, sizeof(int), "mm_struct mm_count", FAULT_ON_ERROR); if (mm_count == 0) error(FATAL, "stale mm_struct address\n"); return mm_count; bailout: error(FATAL, "invalid mm_struct address\n"); return 0; } /* * vm_area_dump() primarily does the work for cmd_vm(), but is also called * from IN_TASK_VMA(), do_vtop(), and foreach(). How it behaves depends * upon the flag and ref arguments: * * UVADDR do_vtop() when dumping the VMA for a uvaddr * UVADDR|VERIFY_ADDR IN_TASK_VMA() macro checks if a uvaddr is in a VMA * PHYSADDR cmd_vm() or foreach(vm) for -p and -R options * PRINT_MM_STRUCT cmd_vm() or foreach(vm) for -m option * PRINT_VMA_STRUCTS cmd_vm() or foreach(vm) for -v option * PRINT_INODES open_files_dump() backdoors foreach(vm) * * ref cmd_vm() or foreach(vm) for -R option that searches * for references -- and only then does a display */ #define PRINT_VM_DATA() \ { \ fprintf(fp, "%s %s ", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST, "MM"), \ mkstring(buf5, VADDR_PRLEN, CENTER|LJUST, "PGD")); \ fprintf(fp, "%s %s\n", \ mkstring(buf4, 6, CENTER|LJUST, "RSS"), \ mkstring(buf5, 8, CENTER|LJUST, "TOTAL_VM")); \ \ fprintf(fp, "%s %s ", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, \ MKSTR(tm->mm_struct_addr)), \ mkstring(buf5, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, \ MKSTR(tm->pgd_addr))); \ \ sprintf(buf4, "%ldk", (tm->rss * PAGESIZE())/1024); \ sprintf(buf5, "%ldk", (tm->total_vm * PAGESIZE())/1024); \ fprintf(fp, "%s %s\n", \ mkstring(buf4, 6, CENTER|LJUST, NULL), \ mkstring(buf5, 8, CENTER|LJUST, NULL)); \ } #define PRINT_VMA_DATA() \ fprintf(fp, "%s%s%s%s%s %6llx%s%s\n", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, MKSTR(vma)), \ space(MINSPACE), \ mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), \ space(MINSPACE), \ mkstring(buf3, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_end)), \ vm_flags, space(MINSPACE), buf1); #define FILENAME_COMPONENT(P,C) \ ((STREQ((P), "/") && STREQ((C), "/")) || \ (!STREQ((C), "/") && strstr((P),(C)))) #define VM_REF_SEARCH (0x1) #define VM_REF_DISPLAY (0x2) #define VM_REF_NUMBER (0x4) #define VM_REF_VMA (0x8) #define VM_REF_PAGE (0x10) #define VM_REF_HEADER (0x20) #define DO_REF_SEARCH(X) ((X) && ((X)->cmdflags & VM_REF_SEARCH)) #define DO_REF_DISPLAY(X) ((X) && ((X)->cmdflags & VM_REF_DISPLAY)) #define VM_REF_CHECK_HEXVAL(X,V) \ (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->hexval == (V))) #define VM_REF_CHECK_DECVAL(X,V) \ (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->decval == (V))) #define VM_REF_CHECK_STRING(X,S) \ (DO_REF_SEARCH(X) && (string_exists(S)) && FILENAME_COMPONENT((S),(X)->str)) #define VM_REF_FOUND(X) ((X) && ((X)->cmdflags & VM_REF_HEADER)) ulong vm_area_dump(ulong task, ulong flag, ulong vaddr, struct reference *ref) { struct task_context *tc; ulong vma; ulong vm_start; ulong vm_end; ulong vm_next, vm_mm; char *dentry_buf, *vma_buf, *file_buf; ulonglong vm_flags; ulong vm_file, inode; ulong dentry, vfsmnt; ulong single_vma; unsigned int radix; int single_vma_found; int found; struct task_mem_usage task_mem_usage, *tm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char vma_header[BUFSIZE]; tc = task_to_context(task); tm = &task_mem_usage; get_task_mem_usage(task, tm); single_vma = 0; single_vma_found = FALSE; if (flag & PRINT_SINGLE_VMA) { single_vma = vaddr; vaddr = 0; } if (flag & PRINT_RADIX_10) radix = 10; else if (flag & PRINT_RADIX_16) radix = 16; else radix = 0; if (ref) { ref->cmdflags = VM_REF_SEARCH; if (IS_A_NUMBER(ref->str)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); if (decimal(ref->str, 0)) ref->decval = dtol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= VM_REF_NUMBER; } } if (VM_REF_CHECK_HEXVAL(ref, tm->mm_struct_addr) || VM_REF_CHECK_HEXVAL(ref, tm->pgd_addr)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); fprintf(fp, "\n"); return (ulong)NULL; } if (!(flag & (UVADDR|PRINT_MM_STRUCT|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && !DO_REF_SEARCH(ref)) PRINT_VM_DATA(); if (!tm->mm_struct_addr) { if (pc->curcmd_flags & MM_STRUCT_FORCE) { if (!is_valid_mm(pc->curcmd_private)) return (ulong)NULL; tc->mm_struct = tm->mm_struct_addr = pc->curcmd_private; /* * tc->mm_struct is changed, use vm_cleanup to * restore it. */ pc->cmd_cleanup_arg = (void *)tc; pc->cmd_cleanup = vm_cleanup; } else return (ulong)NULL; } if (flag & PRINT_MM_STRUCT) { dump_struct("mm_struct", tm->mm_struct_addr, radix); return (ulong)NULL; } readmem(tm->mm_struct_addr + OFFSET(mm_struct_mmap), KVADDR, &vma, sizeof(void *), "mm_struct mmap", FAULT_ON_ERROR); sprintf(vma_header, "%s%s%s%s%s FLAGS%sFILE\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "VMA"), space(MINSPACE), mkstring(buf2, UVADDR_PRLEN, CENTER|RJUST, "START"), space(MINSPACE), mkstring(buf3, UVADDR_PRLEN, CENTER|RJUST, "END"), space(MINSPACE)); if (!(flag & (PHYSADDR|VERIFY_ADDR|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && !DO_REF_SEARCH(ref)) fprintf(fp, "%s", vma_header); for (found = FALSE; vma; vma = vm_next) { if ((flag & PHYSADDR) && !DO_REF_SEARCH(ref)) fprintf(fp, "%s", vma_header); inode = 0; BZERO(buf1, BUFSIZE); vma_buf = fill_vma_cache(vma); vm_mm = ULONG(vma_buf + OFFSET(vm_area_struct_vm_mm)); vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_flags = get_vm_flags(vma_buf); vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); if (flag & PRINT_SINGLE_VMA) { if (vma != single_vma) continue; fprintf(fp, "%s", vma_header); single_vma_found = TRUE; } if (flag & PRINT_VMA_STRUCTS) { dump_struct("vm_area_struct", vma, radix); continue; } if (vm_file && !(flag & VERIFY_ADDR)) { file_buf = fill_file_cache(vm_file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); dentry_buf = NULL; if (dentry) { dentry_buf = fill_dentry_cache(dentry); if (VALID_MEMBER(file_f_vfsmnt)) { vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); get_pathname(dentry, buf1, BUFSIZE, 1, vfsmnt); } else { get_pathname(dentry, buf1, BUFSIZE, 1, 0); } } if ((flag & PRINT_INODES) && dentry) { inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); } } if (!(flag & UVADDR) || ((flag & UVADDR) && ((vaddr >= vm_start) && (vaddr < vm_end)))) { found = TRUE; if (flag & VERIFY_ADDR) return vma; if (DO_REF_SEARCH(ref)) { if (VM_REF_CHECK_HEXVAL(ref, vma) || VM_REF_CHECK_HEXVAL(ref, (ulong)vm_flags) || VM_REF_CHECK_STRING(ref, buf1)) { if (!(ref->cmdflags & VM_REF_HEADER)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); ref->cmdflags |= VM_REF_HEADER; } if (!(ref->cmdflags & VM_REF_VMA) || (ref->cmdflags & VM_REF_PAGE)) { fprintf(fp, "%s", vma_header); ref->cmdflags |= VM_REF_VMA; ref->cmdflags &= ~VM_REF_PAGE; ref->ref1 = vma; } PRINT_VMA_DATA(); } if (vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref)) { if (!(ref->cmdflags & VM_REF_HEADER)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); ref->cmdflags |= VM_REF_HEADER; } if (!(ref->cmdflags & VM_REF_VMA) || (ref->ref1 != vma)) { fprintf(fp, "%s", vma_header); PRINT_VMA_DATA(); ref->cmdflags |= VM_REF_VMA; ref->ref1 = vma; } ref->cmdflags |= VM_REF_DISPLAY; vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref); ref->cmdflags &= ~VM_REF_DISPLAY; } continue; } if (inode) { fprintf(fp, "%lx%s%s%s%s%s%6llx%s%lx %s\n", vma, space(MINSPACE), mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), space(MINSPACE), mkstring(buf3, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_end)), space(MINSPACE), vm_flags, space(MINSPACE), inode, buf1); } else { PRINT_VMA_DATA(); if (flag & (PHYSADDR|PRINT_SINGLE_VMA)) vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref); } if (flag & UVADDR) return vma; } } if (flag & VERIFY_ADDR) return (ulong)NULL; if ((flag & PRINT_SINGLE_VMA) && !single_vma_found) fprintf(fp, "(not found)\n"); if ((flag & UVADDR) && !found) fprintf(fp, "(not found)\n"); if (VM_REF_FOUND(ref)) fprintf(fp, "\n"); return (ulong)NULL; } static int vm_area_page_dump(ulong vma, ulong task, ulong start, ulong end, ulong mm, struct reference *ref) { physaddr_t paddr; ulong offs; char *p1, *p2; int display; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; if (mm == symbol_value("init_mm")) return FALSE; if (!ref || DO_REF_DISPLAY(ref)) fprintf(fp, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), LJUST, "PHYSICAL")); if (DO_REF_DISPLAY(ref)) { start = ref->ref2; } while (start < end) { display = DO_REF_SEARCH(ref) ? FALSE : TRUE; if (VM_REF_CHECK_HEXVAL(ref, start)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } if (uvtop(task_to_context(task), start, &paddr, 0)) { sprintf(buf3, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST|LONGLONG_HEX, MKSTR(&paddr))); if (VM_REF_CHECK_HEXVAL(ref, paddr)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } else if (paddr && swap_location(paddr, buf1)) { sprintf(buf3, "%s SWAP: %s\n", mkstring(buf2, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), buf1); if (DO_REF_SEARCH(ref)) { if (VM_REF_CHECK_DECVAL(ref, THIS_KERNEL_VERSION >= LINUX(2,6,0) ? __swp_offset(paddr) : SWP_OFFSET(paddr))) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } strcpy(buf4, buf3); p1 = strstr(buf4, "SWAP:") + strlen("SWAP: "); p2 = strstr(buf4, " OFFSET:"); *p2 = NULLCHAR; if (VM_REF_CHECK_STRING(ref, p1)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } } else if (vma_file_offset(vma, start, buf1)) { sprintf(buf3, "%s FILE: %s\n", mkstring(buf2, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), buf1); if (DO_REF_SEARCH(ref)) { extract_hex(strstr(buf3, "OFFSET:") + strlen("OFFSET: "), &offs, 0, 0); if (VM_REF_CHECK_HEXVAL(ref, offs)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } } else { sprintf(buf3, "%s (not mapped)\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start))); } if (display) fprintf(fp, "%s", buf3); start += PAGESIZE(); } return FALSE; } /* * Cache the passed-in vm_area_struct. */ char * fill_vma_cache(ulong vma) { int i; char *cache; vt->vma_cache_fills++; for (i = 0; i < VMA_CACHE; i++) { if (vt->cached_vma[i] == vma) { vt->cached_vma_hits[i]++; cache = vt->vma_cache + (SIZE(vm_area_struct)*i); return(cache); } } cache = vt->vma_cache + (SIZE(vm_area_struct)*vt->vma_cache_index); readmem(vma, KVADDR, cache, SIZE(vm_area_struct), "fill_vma_cache", FAULT_ON_ERROR); vt->cached_vma[vt->vma_cache_index] = vma; vt->vma_cache_index = (vt->vma_cache_index+1) % VMA_CACHE; return(cache); } /* * If active, clear the vm_area_struct references. */ void clear_vma_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < VMA_CACHE; i++) { vt->cached_vma[i] = 0; vt->cached_vma_hits[i] = 0; } vt->vma_cache_fills = 0; vt->vma_cache_index = 0; } /* * Check whether an address is a user stack address based * upon its vm_area_struct flags. */ int in_user_stack(ulong task, ulong vaddr) { ulong vma; ulonglong vm_flags; char *vma_buf; if ((vma = vm_area_dump(task, UVADDR|VERIFY_ADDR, vaddr, 0))) { vma_buf = fill_vma_cache(vma); vm_flags = get_vm_flags(vma_buf); if (vm_flags & VM_GROWSDOWN) return TRUE; else if (kernel_symbol_exists("expand_upwards") && (vm_flags & VM_GROWSUP)) return TRUE; /* * per-thread stack */ if ((vm_flags & (VM_READ|VM_WRITE)) == (VM_READ|VM_WRITE)) return TRUE; } return FALSE; } /* * Set the const value of filepages and anonpages * according to MM_FILEPAGES and MM_ANONPAGES. */ static void rss_page_types_init(void) { long anonpages, filepages; if (VALID_MEMBER(mm_struct_rss)) return; if (VALID_MEMBER(mm_struct_rss_stat)) { if (!enumerator_value("MM_FILEPAGES", &filepages) || !enumerator_value("MM_ANONPAGES", &anonpages)) { filepages = 0; anonpages = 1; } tt->filepages = filepages; tt->anonpages = anonpages; } } static struct tgid_context * tgid_quick_search(ulong tgid) { struct tgid_context *last, *next; tt->tgid_searches++; if (!(last = tt->last_tgid)) return NULL; if (tgid == last->tgid) { tt->tgid_cache_hits++; return last; } next = last + 1; if ((next < (tt->tgid_array + RUNNING_TASKS())) && (tgid == next->tgid)) { tt->tgid_cache_hits++; return next; } return NULL; } static void collect_page_member_data(char *optlist, struct meminfo *mi) { int i; int members; char buf[BUFSIZE]; char *memberlist[MAXARGS]; struct struct_member_data *page_member_cache, *pmd; if ((count_chars(optlist, ',')+1) > MAXARGS) error(FATAL, "too many members in comma-separated list\n"); if ((LASTCHAR(optlist) == ',') || (LASTCHAR(optlist) == '.')) error(FATAL, "invalid format: %s\n", optlist); strcpy(buf, optlist); replace_string(optlist, ",", ' '); if (!(members = parse_line(optlist, memberlist))) error(FATAL, "invalid page struct member list format: %s\n", buf); page_member_cache = (struct struct_member_data *) GETBUF(sizeof(struct struct_member_data) * members); for (i = 0, pmd = page_member_cache; i < members; i++, pmd++) { pmd->structure = "page"; pmd->member = memberlist[i]; if (!fill_struct_member_data(pmd)) error(FATAL, "invalid %s struct member: %s\n", pmd->structure, pmd->member); if (CRASHDEBUG(1)) { fprintf(fp, " structure: %s\n", pmd->structure); fprintf(fp, " member: %s\n", pmd->member); fprintf(fp, " type: %ld\n", pmd->type); fprintf(fp, " unsigned_type: %ld\n", pmd->unsigned_type); fprintf(fp, " length: %ld\n", pmd->length); fprintf(fp, " offset: %ld\n", pmd->offset); fprintf(fp, " bitpos: %ld\n", pmd->bitpos); fprintf(fp, " bitsize: %ld%s", pmd->bitsize, members > 1 ? "\n\n" : "\n"); } } mi->nr_members = members; mi->page_member_cache = page_member_cache; } static int get_bitfield_data(struct integer_data *bd) { int pos, size; uint32_t tmpvalue32; uint64_t tmpvalue64; uint32_t mask32; uint64_t mask64; struct struct_member_data *pmd; pmd = bd->pmd; pos = bd->pmd->bitpos; size = bd->pmd->bitsize; if (pos == 0 && size == 0) { bd->bitfield_value = bd->value; return TRUE; } switch (__BYTE_ORDER) { case __LITTLE_ENDIAN: switch (pmd->length) { case 4: tmpvalue32 = (uint32_t)bd->value; tmpvalue32 >>= pos; mask32 = (1 << size) - 1; tmpvalue32 &= mask32; bd->bitfield_value = (ulong)tmpvalue32; break; case 8: tmpvalue64 = (uint64_t)bd->value; tmpvalue64 >>= pos; mask64 = (1UL << size) - 1; tmpvalue64 &= mask64; bd->bitfield_value = tmpvalue64; break; default: return FALSE; } break; case __BIG_ENDIAN: switch (pmd->length) { case 4: tmpvalue32 = (uint32_t)bd->value; tmpvalue32 <<= pos; tmpvalue32 >>= (32-size); mask32 = (1 << size) - 1; tmpvalue32 &= mask32; bd->bitfield_value = (ulong)tmpvalue32; break; case 8: tmpvalue64 = (uint64_t)bd->value; tmpvalue64 <<= pos; tmpvalue64 >>= (64-size); mask64 = (1UL << size) - 1; tmpvalue64 &= mask64; bd->bitfield_value = tmpvalue64; break; default: return FALSE; } break; } return TRUE; } static int show_page_member_data(char *pcache, ulong pp, struct meminfo *mi, char *outputbuffer) { int bufferindex, i, c, cnt, radix, struct_intbuf[10]; ulong longbuf, struct_longbuf[10]; unsigned char boolbuf; void *voidptr; ushort shortbuf; struct struct_member_data *pmd; struct integer_data integer_data; bufferindex = 0; pmd = mi->page_member_cache; bufferindex += sprintf(outputbuffer + bufferindex, "%lx ", pp); for (i = 0; i < mi->nr_members; pmd++, i++) { switch (pmd->type) { case TYPE_CODE_PTR: voidptr = VOID_PTR(pcache + pmd->offset); bufferindex += sprintf(outputbuffer + bufferindex, VADDR_PRLEN == 8 ? "%08lx " : "%016lx ", (ulong)voidptr); break; case TYPE_CODE_INT: switch (pmd->length) { case 1: integer_data.value = UCHAR(pcache + pmd->offset); break; case 2: integer_data.value = USHORT(pcache + pmd->offset); break; case 4: integer_data.value = UINT(pcache + pmd->offset); break; case 8: if (BITS32()) goto unsupported; integer_data.value = ULONG(pcache + pmd->offset); break; default: goto unsupported; } integer_data.pmd = pmd; if (get_bitfield_data(&integer_data)) longbuf = integer_data.bitfield_value; else goto unsupported; if (STREQ(pmd->member, "flags")) radix = 16; else if (STRNEQ(pmd->member, "_count") || STRNEQ(pmd->member, "_mapcount")) radix = 10; else radix = *gdb_output_radix; if (pmd->unsigned_type) { if (pmd->length == sizeof(ulonglong)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%lu " : "%016lx ", longbuf); else if (pmd->length == sizeof(int)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%08x ", (uint)longbuf); else if (pmd->length == sizeof(short)) { bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%04x ", (ushort)longbuf); } else if (pmd->length == sizeof(char)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%02x ", (unsigned char)longbuf); } else { if (pmd->length == sizeof(ulonglong)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%ld " : "%016lx", longbuf); else if (pmd->length == sizeof(int)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%08x ", (int)longbuf); else if (pmd->length == sizeof(short)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%04x ", (short)longbuf); else if (pmd->length == sizeof(char)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%02x ", (char)longbuf); } break; case TYPE_CODE_STRUCT: if (STRNEQ(pmd->member, "_count") || STRNEQ(pmd->member, "_mapcount")) { BCOPY(pcache+pmd->offset, (char *)&struct_intbuf[0], pmd->length); bufferindex += sprintf(outputbuffer + bufferindex, "%d ", struct_intbuf[0]); } else if ((pmd->length % sizeof(long)) == 0) { BCOPY(pcache+pmd->offset, (char *)&struct_longbuf[0], pmd->length); cnt = pmd->length / sizeof(long); for (c = 0; c < cnt; c++) { bufferindex += sprintf(outputbuffer + bufferindex, BITS32() ? "%08lx%s" : "%016lx%s", struct_longbuf[c], (c+1) < cnt ? "," : ""); } bufferindex += sprintf(outputbuffer + bufferindex, " "); } else if ((pmd->length % sizeof(int)) == 0) { BCOPY(pcache+pmd->offset, (char *)&struct_intbuf[0], pmd->length); cnt = pmd->length / sizeof(int); for (c = 0; c < cnt; c++) { bufferindex += sprintf(outputbuffer + bufferindex, "%08x%s", struct_intbuf[c], (c+1) < cnt ? "," : ""); } } else if (pmd->length == sizeof(short)) { BCOPY(pcache+pmd->offset, (char *)&shortbuf, pmd->length); bufferindex += sprintf(outputbuffer + bufferindex, "%04x ", shortbuf); } else goto unsupported; break; case TYPE_CODE_BOOL: radix = *gdb_output_radix; boolbuf = UCHAR(pcache + pmd->offset); if (boolbuf <= 1) bufferindex += sprintf(outputbuffer + bufferindex, "%s ", boolbuf ? "true" : "false"); else bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d" : "%x ", boolbuf); break; default: unsupported: error(FATAL, "unsupported page member reference: %s.%s\n", pmd->structure, pmd->member); break; } } return bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } /* * Fill in the task_mem_usage structure with the RSS, virtual memory size, * percent of physical memory being used, and the mm_struct address. */ void get_task_mem_usage(ulong task, struct task_mem_usage *tm) { struct task_context *tc; long rss = 0; BZERO(tm, sizeof(struct task_mem_usage)); if (IS_ZOMBIE(task) || IS_EXITING(task)) return; tc = task_to_context(task); if (!tc || !tc->mm_struct) /* probably a kernel thread */ return; tm->mm_struct_addr = tc->mm_struct; if (!task_mm(task, TRUE)) return; if (VALID_MEMBER(mm_struct_rss)) /* * mm_struct.rss or mm_struct._rss exist. */ tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss)); else { /* * Latest kernels have mm_struct.mm_rss_stat[]. */ if (VALID_MEMBER(mm_struct_rss_stat)) { long anonpages, filepages; anonpages = tt->anonpages; filepages = tt->filepages; rss += LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (filepages * sizeof(long))); rss += LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (anonpages * sizeof(long))); } /* Check whether SPLIT_RSS_COUNTING is enabled */ if (VALID_MEMBER(task_struct_rss_stat)) { int sync_rss; struct tgid_context tgid, *tgid_array, *tg, *first, *last; tgid_array = tt->tgid_array; tgid.tgid = task_tgid(task); if (!(tg = tgid_quick_search(tgid.tgid))) tg = (struct tgid_context *)bsearch(&tgid, tgid_array, RUNNING_TASKS(), sizeof(struct tgid_context), sort_by_tgid); if (tg) { /* find the first element which has the same tgid */ first = tg; while ((first > tgid_array) && ((first - 1)->tgid == first->tgid)) first--; /* find the last element which have same tgid */ last = tg; while ((last < (tgid_array + (RUNNING_TASKS() - 1))) && (last->tgid == (last + 1)->tgid)) last++; while (first <= last) { /* count 0 -> filepages */ if (!readmem(first->task + OFFSET(task_struct_rss_stat) + OFFSET(task_rss_stat_count), KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_FILEPAGES", RETURN_ON_ERROR)) continue; rss += sync_rss; /* count 1 -> anonpages */ if (!readmem(first->task + OFFSET(task_struct_rss_stat) + OFFSET(task_rss_stat_count) + sizeof(int), KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_ANONPAGES", RETURN_ON_ERROR)) continue; rss += sync_rss; if (first == last) break; first++; } tt->last_tgid = last; } } /* * mm_struct._anon_rss and mm_struct._file_rss should exist. */ if (VALID_MEMBER(mm_struct_anon_rss)) rss += LONG(tt->mm_struct + OFFSET(mm_struct_anon_rss)); if (VALID_MEMBER(mm_struct_file_rss)) rss += LONG(tt->mm_struct + OFFSET(mm_struct_file_rss)); tm->rss = (unsigned long)rss; } tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd)); if (is_kernel_thread(task)) return; tm->pct_physmem = ((double)(tm->rss*100)) / ((double)(MIN(vt->total_pages, vt->num_physpages ? vt->num_physpages : vt->total_pages))); } /* * cmd_kmem() is designed as a multi-purpose kernel memory investigator with * the flag argument sending it off in a multitude of areas. To date, the * following options are defined: * * -f displays the contents of the system free_area[] array headers; * also verifies that the page count equals nr_free_pages * -F same as -f, but also dumps all pages linked to that header. * -p displays basic information about each page in the system * mem_map[] array. * -s displays kmalloc() slab data. * -S same as -s, but displays all kmalloc() objects. * -v displays the vmlist entries. * -c displays the number of pages in the page_hash_table. * -C displays all entries in the page_hash_table. * -i displays informational data shown by /proc/meminfo. * -h hugepage information from hstates[] array * * -P forces address to be defined as a physical address * address when used with -f, the address can be either a page pointer * or a physical address; the free_area header containing the page * (if any) is displayed. * When used with -p, the address can be either a page pointer or a * physical address; its basic mem_map page information is displayed. * When used with -c, the page_hash_table entry containing the * page pointer is displayed. */ /* Note: VERBOSE is 0x1, ADDRESS_SPECIFIED is 0x2 */ #define GET_TOTALRAM_PAGES (ADDRESS_SPECIFIED << 1) #define GET_SHARED_PAGES (ADDRESS_SPECIFIED << 2) #define GET_FREE_PAGES (ADDRESS_SPECIFIED << 3) #define GET_FREE_HIGHMEM_PAGES (ADDRESS_SPECIFIED << 4) #define GET_ZONE_SIZES (ADDRESS_SPECIFIED << 5) #define GET_HIGHEST (ADDRESS_SPECIFIED << 6) #define GET_BUFFERS_PAGES (ADDRESS_SPECIFIED << 7) #define GET_SLAB_PAGES (ADDRESS_SPECIFIED << 8) #define GET_PHYS_TO_VMALLOC (ADDRESS_SPECIFIED << 9) #define GET_ACTIVE_LIST (ADDRESS_SPECIFIED << 10) #define GET_INACTIVE_LIST (ADDRESS_SPECIFIED << 11) #define GET_INACTIVE_CLEAN (ADDRESS_SPECIFIED << 12) /* obsolete */ #define GET_INACTIVE_DIRTY (ADDRESS_SPECIFIED << 13) /* obsolete */ #define SLAB_GET_COUNTS (ADDRESS_SPECIFIED << 14) #define SLAB_WALKTHROUGH (ADDRESS_SPECIFIED << 15) #define GET_VMLIST_COUNT (ADDRESS_SPECIFIED << 16) #define GET_VMLIST (ADDRESS_SPECIFIED << 17) #define SLAB_DATA_NOSAVE (ADDRESS_SPECIFIED << 18) #define GET_SLUB_SLABS (ADDRESS_SPECIFIED << 19) #define GET_SLUB_OBJECTS (ADDRESS_SPECIFIED << 20) #define VMLIST_VERIFY (ADDRESS_SPECIFIED << 21) #define SLAB_FIRST_NODE (ADDRESS_SPECIFIED << 22) #define CACHE_SET (ADDRESS_SPECIFIED << 23) #define SLAB_OVERLOAD_PAGE_PTR (ADDRESS_SPECIFIED << 24) #define SLAB_BITFIELD (ADDRESS_SPECIFIED << 25) #define GET_ALL \ (GET_SHARED_PAGES|GET_TOTALRAM_PAGES|GET_BUFFERS_PAGES|GET_SLAB_PAGES) void cmd_kmem(void) { int i; int c; int sflag, Sflag, pflag, fflag, Fflag, vflag, zflag, oflag, gflag; int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag, Vflag, hflag; struct meminfo meminfo; ulonglong value[MAXARGS]; char buf[BUFSIZE]; char *p1; int spec_addr, escape; spec_addr = 0; sflag = Sflag = pflag = fflag = Fflag = Pflag = zflag = oflag = 0; vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = Vflag = 0; gflag = hflag = 0; escape = FALSE; BZERO(&meminfo, sizeof(struct meminfo)); BZERO(&value[0], sizeof(ulonglong)*MAXARGS); pc->curcmd_flags &= ~HEADER_PRINTED; while ((c = getopt(argcnt, args, "gI:sSFfm:pvczCinl:L:PVoh")) != EOF) { switch(c) { case 'V': Vflag = 1; break; case 'n': nflag = 1; break; case 'z': zflag = 1; break; case 'i': iflag = 1; break; case 'h': hflag = 1; break; case 'C': Cflag = 1, cflag = 0;; break; case 'c': cflag = 1, Cflag = 0; break; case 'v': vflag = 1; break; case 's': sflag = 1; Sflag = 0; break; case 'S': Sflag = 1; sflag = 0; break; case 'F': Fflag = 1; fflag = 0; break;; case 'f': fflag = 1; Fflag = 0; break;; case 'p': pflag = 1; break; case 'm': pflag = 1; collect_page_member_data(optarg, &meminfo); break; case 'I': meminfo.ignore = optarg; break; case 'l': if (STREQ(optarg, "a")) { meminfo.flags |= GET_ACTIVE_LIST; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "i")) { meminfo.flags |= GET_INACTIVE_LIST; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "ic")) { meminfo.flags |= GET_INACTIVE_CLEAN; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "id")) { meminfo.flags |= GET_INACTIVE_DIRTY; lflag = 1; Lflag = 0; } else argerrs++; break; case 'L': if (STREQ(optarg, "a")) { meminfo.flags |= GET_ACTIVE_LIST; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "i")) { meminfo.flags |= GET_INACTIVE_LIST; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "ic")) { meminfo.flags |= GET_INACTIVE_CLEAN; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "id")) { meminfo.flags |= GET_INACTIVE_DIRTY; Lflag = 1; lflag = 0; } else argerrs++; break; case 'P': Pflag = 1; break; case 'o': oflag = 1; break; case 'g': gflag = 1; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((sflag + Sflag + pflag + fflag + Fflag + Vflag + oflag + vflag + Cflag + cflag + iflag + lflag + Lflag + gflag + hflag) > 1) { error(INFO, "only one flag allowed!\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (sflag || Sflag || !(vt->flags & KMEM_CACHE_INIT)) kmem_cache_init(); while (args[optind]) { if (hexadecimal(args[optind], 0)) { value[spec_addr++] = htoll(args[optind], FAULT_ON_ERROR, NULL); } else { if (meminfo.reqname) error(FATAL, "only one kmem_cache reference is allowed\n"); meminfo.reqname = args[optind]; if (args[optind][0] == '\\') { meminfo.reqname = &args[optind][1]; escape = TRUE; } else meminfo.reqname = args[optind]; if (!sflag && !Sflag) cmd_usage(pc->curcmd, SYNOPSIS); } optind++; } for (i = 0; i < spec_addr; i++) { if (Pflag) meminfo.memtype = PHYSADDR; else meminfo.memtype = IS_KVADDR(value[i]) ? KVADDR : PHYSADDR; if (fflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_free_pages(&meminfo); fflag++; } if (pflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; dump_mem_map(&meminfo); pflag++; } if (sflag || Sflag) { if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); meminfo.flags = Sflag ? VERBOSE : 0; if (meminfo.memtype == PHYSADDR) { if (value[i] < VTOP(vt->high_memory)) { value[i] = PTOV(value[i]); meminfo.memtype = KVADDR; } else error(WARNING, "cannot make virtual-to-physical translation: %llx\n", value[i]); } if ((p1 = is_kmem_cache_addr(value[i], buf))) { if (meminfo.reqname) error(FATAL, "only one kmem_cache reference is allowed\n"); meminfo.reqname = p1; meminfo.cache = value[i]; meminfo.flags |= CACHE_SET; if ((i+1) == spec_addr) { /* done? */ if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); } meminfo.flags &= ~CACHE_SET; } else { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (Sflag && (vt->flags & KMALLOC_SLUB)) meminfo.flags |= VERBOSE; if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); } if (sflag) sflag++; if (Sflag) Sflag++; } if (vflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; dump_vmlist(&meminfo); vflag++; } if (cflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); dump_page_hash_table(&meminfo); cflag++; } if (lflag) { meminfo.spec_addr = value[i]; meminfo.flags |= (ADDRESS_SPECIFIED|VERBOSE); if (meminfo.calls++) fprintf(fp, "\n"); dump_page_lists(&meminfo); lflag++; } if (gflag) { if (i) fprintf(fp, "\n"); dump_page_flags(value[i]); gflag++; } /* * no value arguments allowed! */ if (zflag || nflag || iflag || Fflag || Cflag || Lflag || Vflag || oflag || hflag) { error(INFO, "no address arguments allowed with this option\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (!(sflag + Sflag + pflag + fflag + vflag + cflag + lflag + Lflag + gflag)) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); else kmem_cache_init(); kmem_search(&meminfo); } } if (iflag == 1) dump_kmeminfo(); if (pflag == 1) dump_mem_map(&meminfo); if (fflag == 1) vt->dump_free_pages(&meminfo); if (Fflag == 1) { meminfo.flags = VERBOSE; vt->dump_free_pages(&meminfo); } if (hflag == 1) dump_hstates(); if (sflag == 1) { if (!escape && STREQ(meminfo.reqname, "list")) kmem_cache_list(); else if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); else vt->dump_kmem_cache(&meminfo); } if (Sflag == 1) { if (STREQ(meminfo.reqname, "list")) kmem_cache_list(); else if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); else { meminfo.flags = VERBOSE; vt->dump_kmem_cache(&meminfo); } } if (vflag == 1) dump_vmlist(&meminfo); if (Cflag == 1) { meminfo.flags = VERBOSE; dump_page_hash_table(&meminfo); } if (cflag == 1) dump_page_hash_table(&meminfo); if (nflag == 1) dump_memory_nodes(MEMORY_NODES_DUMP); if (zflag == 1) dump_zone_stats(); if (lflag == 1) { dump_page_lists(&meminfo); } if (Lflag == 1) { meminfo.flags |= VERBOSE; dump_page_lists(&meminfo); } if (Vflag == 1) { dump_vm_stat(NULL, NULL, 0); dump_page_states(); dump_vm_event_state(); } if (oflag == 1) dump_per_cpu_offsets(); if (gflag == 1) dump_page_flags(0); if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + Vflag + zflag + oflag + cflag + Cflag + iflag + nflag + lflag + Lflag + gflag + hflag + meminfo.calls)) cmd_usage(pc->curcmd, SYNOPSIS); } static void PG_reserved_flag_init(void) { ulong pageptr; int count; ulong vaddr, flags; char *buf; if (enumerator_value("PG_reserved", (long *)&flags)) { vt->PG_reserved = 1 << flags; if (CRASHDEBUG(2)) fprintf(fp, "PG_reserved (enum): %lx\n", vt->PG_reserved); return; } vaddr = kt->stext ? kt->stext : symbol_value("sys_read"); if (!phys_to_page((physaddr_t)VTOP(vaddr), &pageptr)) return; buf = (char *)GETBUF(SIZE(page)); if (!readmem(pageptr, KVADDR, buf, SIZE(page), "reserved page", RETURN_ON_ERROR|QUIET)) { FREEBUF(buf); return; } flags = ULONG(buf + OFFSET(page_flags)); count = INT(buf + OFFSET(page_count)); if (count_bits_long(flags) == 1) vt->PG_reserved = flags; else vt->PG_reserved = 1 << (ffsl(flags)-1); if (count == -1) vt->flags |= PGCNT_ADJ; if (CRASHDEBUG(2)) fprintf(fp, "PG_reserved: vaddr: %lx page: %lx flags: %lx => %lx\n", vaddr, pageptr, flags, vt->PG_reserved); FREEBUF(buf); } static void PG_slab_flag_init(void) { int bit; ulong pageptr; ulong vaddr, flags, flags2; char buf[BUFSIZE]; /* safe for a page struct */ /* * Set the old defaults in case all else fails. */ if (enumerator_value("PG_slab", (long *)&flags)) { vt->PG_slab = flags; if (CRASHDEBUG(2)) fprintf(fp, "PG_slab (enum): %lx\n", vt->PG_slab); } else if (VALID_MEMBER(page_pte)) { if (THIS_KERNEL_VERSION < LINUX(2,6,0)) vt->PG_slab = 10; else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) vt->PG_slab = 7; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { vt->PG_slab = 7; } else { if (try_get_symbol_data("vm_area_cachep", sizeof(void *), &vaddr) && phys_to_page((physaddr_t)VTOP(vaddr), &pageptr) && readmem(pageptr, KVADDR, buf, SIZE(page), "vm_area_cachep page", RETURN_ON_ERROR|QUIET)) { flags = ULONG(buf + OFFSET(page_flags)); if ((bit = ffsl(flags))) { vt->PG_slab = bit - 1; if (CRASHDEBUG(2)) fprintf(fp, "PG_slab bit: vaddr: %lx page: %lx flags: %lx => %ld\n", vaddr, pageptr, flags, vt->PG_slab); } } } if (VALID_MEMBER(page_compound_head)) { if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: (UNUSED): page.compound_head exists!\n"); } else if (vt->flags & KMALLOC_SLUB) { /* * PG_slab and the following are hardwired for * kernels prior to the pageflags enumerator. */ #define PG_compound 14 /* Part of a compound page */ #define PG_reclaim 17 /* To be reclaimed asap */ vt->PG_head_tail_mask = ((1L << PG_compound) | (1L << PG_reclaim)); if (enumerator_value("PG_tail", (long *)&flags)) vt->PG_head_tail_mask = (1L << flags); else if (enumerator_value("PG_compound", (long *)&flags) && enumerator_value("PG_reclaim", (long *)&flags2)) { vt->PG_head_tail_mask = ((1L << flags) | (1L << flags2)); if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); } else if (vt->flags & PAGEFLAGS) { vt->PG_head_tail_mask = 0; error(WARNING, "SLUB: cannot determine how compound pages are linked\n\n"); } } else { if (enumerator_value("PG_tail", (long *)&flags)) vt->PG_head_tail_mask = (1L << flags); else if (enumerator_value("PG_compound", (long *)&flags) && enumerator_value("PG_reclaim", (long *)&flags2)) { vt->PG_head_tail_mask = ((1L << flags) | (1L << flags2)); if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: %lx (PG_compound|PG_reclaim)\n", vt->PG_head_tail_mask); } else if (vt->flags & PAGEFLAGS) error(WARNING, "SLAB: cannot determine how compound pages are linked\n\n"); } if (!vt->PG_slab) error(INFO, "cannot determine PG_slab bit value\n"); } /* * dump_mem_map() displays basic data about each entry in the mem_map[] * array, or if an address is specified, just the mem_map[] entry for that * address. Specified addresses can either be physical address or page * structure pointers. */ /* Page flag bit values */ #define v22_PG_locked 0 #define v22_PG_error 1 #define v22_PG_referenced 2 #define v22_PG_dirty 3 #define v22_PG_uptodate 4 #define v22_PG_free_after 5 #define v22_PG_decr_after 6 #define v22_PG_swap_unlock_after 7 #define v22_PG_DMA 8 #define v22_PG_Slab 9 #define v22_PG_swap_cache 10 #define v22_PG_skip 11 #define v22_PG_reserved 31 #define v24_PG_locked 0 #define v24_PG_error 1 #define v24_PG_referenced 2 #define v24_PG_uptodate 3 #define v24_PG_dirty 4 #define v24_PG_decr_after 5 #define v24_PG_active 6 #define v24_PG_inactive_dirty 7 #define v24_PG_slab 8 #define v24_PG_swap_cache 9 #define v24_PG_skip 10 #define v24_PG_inactive_clean 11 #define v24_PG_highmem 12 #define v24_PG_checked 13 /* kill me in 2.5.. */ #define v24_PG_bigpage 14 /* bits 21-30 unused */ #define v24_PG_arch_1 30 #define v24_PG_reserved 31 #define v26_PG_private 12 #define PGMM_CACHED (512) static void dump_mem_map_SPARSEMEM(struct meminfo *mi) { ulong i; long total_pages; int others, page_not_mapped, phys_not_mapped, page_mapping; ulong pp, ppend; physaddr_t phys, physend; ulong tmp, reserved, shared, slabs; ulong PG_reserved_flag; long buffers; ulong inode, offset, flags, mapping, index; uint count; int print_hdr, pg_spec, phys_spec, done; int v22; char hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *page_cache; char *pcache; ulong section, section_nr, nr_mem_sections, section_size; long buffersize; char *outputbuffer; int bufferindex; buffersize = 1024 * 1024; outputbuffer = GETBUF(buffersize + 512); char style1[100]; char style2[100]; char style3[100]; char style4[100]; sprintf((char *)&style1, "%%lx%s%%%dllx%s%%%dlx%s%%8lx %%2d%s", space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE), space(MINSPACE)); sprintf((char *)&style2, "%%-%dlx%s%%%dllx%s%s%s%s %2s ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, " "), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); sprintf((char *)&style3, "%%-%dlx%s%%%dllx%s%s%s%s %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "-------"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "-----")); sprintf((char *)&style4, "%%-%dlx%s%%%dllx%s%%%dlx%s%%8lx %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE)); v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ if (v22) { sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), space(MINSPACE), mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), space(MINSPACE-1)); } else if (mi->nr_members) { sprintf(hdr, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE")); for (i = 0; i < mi->nr_members; i++) sprintf(&hdr[strlen(hdr)], " %s", mi->page_member_cache[i].member); strcat(hdr, "\n"); } else { sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "INDEX")); } mapping = index = 0; reserved = shared = slabs = buffers = inode = offset = 0; pg_spec = phys_spec = print_hdr = FALSE; switch (mi->flags) { case ADDRESS_SPECIFIED: switch (mi->memtype) { case KVADDR: if (is_page_ptr(mi->spec_addr, NULL)) pg_spec = TRUE; else { if (kvtop(NULL, mi->spec_addr, &phys, 0)) { mi->spec_addr = phys; phys_spec = TRUE; } else return; } break; case PHYSADDR: phys_spec = TRUE; break; default: error(FATAL, "dump_mem_map: no memtype specified\n"); break; } print_hdr = TRUE; break; case GET_ALL: shared = 0; reserved = 0; buffers = 0; slabs = 0; break; case GET_SHARED_PAGES: shared = 0; break; case GET_TOTALRAM_PAGES: reserved = 0; break; case GET_BUFFERS_PAGES: buffers = 0; break; case GET_SLAB_PAGES: slabs = 0; break; default: print_hdr = TRUE; break; } page_cache = GETBUF(SIZE(page) * PGMM_CACHED); done = FALSE; total_pages = 0; nr_mem_sections = NR_MEM_SECTIONS(); bufferindex = 0; /* * Iterate over all possible sections */ for (section_nr = 0; section_nr < nr_mem_sections ; section_nr++) { if (CRASHDEBUG(2)) fprintf(fp, "section_nr = %ld\n", section_nr); /* * If we are looking up a specific address, jump directly * to the section with that page */ if (mi->flags & ADDRESS_SPECIFIED) { ulong pfn; physaddr_t tmp; if (pg_spec) { if (!page_to_phys(mi->spec_addr, &tmp)) return; pfn = tmp >> PAGESHIFT(); } else pfn = mi->spec_addr >> PAGESHIFT(); section_nr = pfn_to_section_nr(pfn); } if (!(section = valid_section_nr(section_nr))) { #ifdef NOTDEF break; /* On a real sparsemem system we need to check * every section as gaps may exist. But this * can be slow. If we know we don't have gaps * just stop validating sections when we * get to the end of the valid ones. * In the future find a way to short circuit * this loop. */ #endif if (mi->flags & ADDRESS_SPECIFIED) break; continue; } if (print_hdr) { if (!(pc->curcmd_flags & HEADER_PRINTED)) fprintf(fp, "%s", hdr); print_hdr = FALSE; pc->curcmd_flags |= HEADER_PRINTED; } pp = section_mem_map_addr(section); pp = sparse_decode_mem_map(pp, section_nr); phys = (physaddr_t) section_nr * PAGES_PER_SECTION() * PAGESIZE(); section_size = PAGES_PER_SECTION(); for (i = 0; i < section_size; i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); if ((pg_spec && (mi->spec_addr > ppend)) || (phys_spec && (PHYSPAGEBASE(mi->spec_addr) > physend))) { i += (PGMM_CACHED-1); pp = ppend; phys = physend; continue; } fill_mem_map_cache(pp, ppend, page_cache); } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); if (received_SIGINT()) restart(0); if ((pg_spec && (pp == mi->spec_addr)) || (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) done = TRUE; if (!done && (pg_spec || phys_spec)) continue; if (mi->nr_members) { bufferindex += show_page_member_data(pcache, pp, mi, outputbuffer+bufferindex); goto display_members; } flags = ULONG(pcache + OFFSET(page_flags)); if (SIZE(page_flags) == 4) flags &= 0xffffffff; count = UINT(pcache + OFFSET(page_count)); switch (mi->flags) { case GET_ALL: case GET_BUFFERS_PAGES: if (VALID_MEMBER(page_buffers)) { tmp = ULONG(pcache + OFFSET(page_buffers)); if (tmp) buffers++; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { if ((flags >> v26_PG_private) & 1) buffers++; } else error(FATAL, "cannot determine whether pages have buffers\n"); if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SLAB_PAGES: if (v22) { if ((flags >> v22_PG_Slab) & 1) slabs++; } else if (vt->PG_slab) { if ((flags >> vt->PG_slab) & 1) slabs++; } else { if ((flags >> v24_PG_slab) & 1) slabs++; } if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SHARED_PAGES: case GET_TOTALRAM_PAGES: if (vt->PG_reserved) PG_reserved_flag = vt->PG_reserved; else PG_reserved_flag = v22 ? 1 << v22_PG_reserved : 1 << v24_PG_reserved; if (flags & PG_reserved_flag) { reserved++; } else { if ((int)count > (vt->flags & PGCNT_ADJ ? 0 : 1)) shared++; } continue; } page_mapping = VALID_MEMBER(page_mapping); if (v22) { inode = ULONG(pcache + OFFSET(page_inode)); offset = ULONG(pcache + OFFSET(page_offset)); } else if (page_mapping) { mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); } page_not_mapped = phys_not_mapped = FALSE; if (v22) { bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style1, pp, phys, inode, offset, count); } else { if ((vt->flags & V_MEM_MAP)) { if (!machdep->verify_paddr(phys)) phys_not_mapped = TRUE; if (!kvtop(NULL, pp, NULL, 0)) page_not_mapped = TRUE; } if (page_not_mapped) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style2, pp, phys); else if (!page_mapping) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style3, pp, phys, count); else bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style4, pp, phys, mapping, index, count); } others = 0; #define sprintflag(X) sprintf(outputbuffer + bufferindex, X, others++ ? "," : "") if (v22) { if ((flags >> v22_PG_DMA) & 1) bufferindex += sprintflag("%sDMA"); if ((flags >> v22_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v22_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v22_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v22_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v22_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v22_PG_free_after) & 1) bufferindex += sprintflag("%sfree_after"); if ((flags >> v22_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v22_PG_swap_unlock_after) & 1) bufferindex += sprintflag("%sswap_unlock_after"); if ((flags >> v22_PG_Slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v22_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v22_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v22_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { if (vt->flags & PAGEFLAGS) bufferindex += translate_page_flags(outputbuffer+bufferindex, flags); else bufferindex += sprintf(outputbuffer+bufferindex, "%lx\n", flags); } else { if ((flags >> v24_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v24_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v24_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v24_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v24_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v24_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v24_PG_active) & 1) bufferindex += sprintflag("%sactive"); if ((flags >> v24_PG_inactive_dirty) & 1) bufferindex += sprintflag("%sinactive_dirty"); if ((flags >> v24_PG_slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v24_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v24_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v24_PG_inactive_clean) & 1) bufferindex += sprintflag("%sinactive_clean"); if ((flags >> v24_PG_highmem) & 1) bufferindex += sprintflag("%shighmem"); if ((flags >> v24_PG_checked) & 1) bufferindex += sprintflag("%schecked"); if ((flags >> v24_PG_bigpage) & 1) bufferindex += sprintflag("%sbigpage"); if ((flags >> v24_PG_arch_1) & 1) bufferindex += sprintflag("%sarch_1"); if ((flags >> v24_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); if (phys_not_mapped) bufferindex += sprintflag("%s[NOT MAPPED]"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } display_members: if (bufferindex > buffersize) { fprintf(fp, "%s", outputbuffer); bufferindex = 0; } if (done) break; } if (done) break; } if (bufferindex > 0) { fprintf(fp, "%s", outputbuffer); } switch (mi->flags) { case GET_TOTALRAM_PAGES: mi->retval = total_pages - reserved; break; case GET_SHARED_PAGES: mi->retval = shared; break; case GET_BUFFERS_PAGES: mi->retval = buffers; break; case GET_SLAB_PAGES: mi->retval = slabs; break; case GET_ALL: mi->get_totalram = total_pages - reserved; mi->get_shared = shared; mi->get_buffers = buffers; mi->get_slabs = slabs; break; case ADDRESS_SPECIFIED: mi->retval = done; break; } if (mi->nr_members) FREEBUF(mi->page_member_cache); FREEBUF(outputbuffer); FREEBUF(page_cache); } static void dump_mem_map(struct meminfo *mi) { long i, n; long total_pages; int others, page_not_mapped, phys_not_mapped, page_mapping; ulong pp, ppend; physaddr_t phys, physend; ulong tmp, reserved, shared, slabs; ulong PG_reserved_flag; long buffers; ulong inode, offset, flags, mapping, index; ulong node_size; uint count; int print_hdr, pg_spec, phys_spec, done; int v22; struct node_table *nt; char hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *page_cache; char *pcache; long buffersize; char *outputbuffer; int bufferindex; char style1[100]; char style2[100]; char style3[100]; char style4[100]; if (IS_SPARSEMEM()) { dump_mem_map_SPARSEMEM(mi); return; } buffersize = 1024 * 1024; outputbuffer = GETBUF(buffersize + 512); sprintf((char *)&style1, "%%lx%s%%%dllx%s%%%dlx%s%%8lx %%2d%s", space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE), space(MINSPACE)); sprintf((char *)&style2, "%%-%dlx%s%%%dllx%s%s%s%s %2s ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, " "), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); sprintf((char *)&style3, "%%-%dlx%s%%%dllx%s%s%s%s %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "-------"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "-----")); sprintf((char *)&style4, "%%-%dlx%s%%%dllx%s%%%dlx%s%%8lx %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE)); v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ if (v22) { sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), space(MINSPACE), mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), space(MINSPACE-1)); } else if (mi->nr_members) { sprintf(hdr, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE")); for (i = 0; i < mi->nr_members; i++) sprintf(&hdr[strlen(hdr)], " %s", mi->page_member_cache[i].member); strcat(hdr, "\n"); } else { sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "INDEX")); } mapping = index = 0; reserved = shared = slabs = buffers = inode = offset = 0; pg_spec = phys_spec = print_hdr = FALSE; switch (mi->flags) { case ADDRESS_SPECIFIED: switch (mi->memtype) { case KVADDR: if (is_page_ptr(mi->spec_addr, NULL)) pg_spec = TRUE; else { if (kvtop(NULL, mi->spec_addr, &phys, 0)) { mi->spec_addr = phys; phys_spec = TRUE; } else return; } break; case PHYSADDR: phys_spec = TRUE; break; default: error(FATAL, "dump_mem_map: no memtype specified\n"); break; } print_hdr = TRUE; break; case GET_ALL: shared = 0; reserved = 0; buffers = 0; slabs = 0; break; case GET_SHARED_PAGES: shared = 0; break; case GET_TOTALRAM_PAGES: reserved = 0; break; case GET_BUFFERS_PAGES: buffers = 0; break; case GET_SLAB_PAGES: slabs = 0; break; default: print_hdr = TRUE; break; } page_cache = GETBUF(SIZE(page) * PGMM_CACHED); done = FALSE; total_pages = 0; bufferindex = 0; for (n = 0; n < vt->numnodes; n++) { if (print_hdr) { if (!(pc->curcmd_flags & HEADER_PRINTED)) fprintf(fp, "%s%s", n ? "\n" : "", hdr); print_hdr = FALSE; pc->curcmd_flags |= HEADER_PRINTED; } nt = &vt->node_table[n]; total_pages += nt->size; pp = nt->mem_map; phys = nt->start_paddr; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; for (i = 0; i < node_size; i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); if ((pg_spec && (mi->spec_addr > ppend)) || (phys_spec && (PHYSPAGEBASE(mi->spec_addr) > physend))) { i += (PGMM_CACHED-1); pp = ppend; phys = physend; continue; } fill_mem_map_cache(pp, ppend, page_cache); } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); if (received_SIGINT()) restart(0); if ((pg_spec && (pp == mi->spec_addr)) || (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) done = TRUE; if (!done && (pg_spec || phys_spec)) continue; if (mi->nr_members) { bufferindex += show_page_member_data(pcache, pp, mi, outputbuffer+bufferindex); goto display_members; } flags = ULONG(pcache + OFFSET(page_flags)); if (SIZE(page_flags) == 4) flags &= 0xffffffff; count = UINT(pcache + OFFSET(page_count)); switch (mi->flags) { case GET_ALL: case GET_BUFFERS_PAGES: if (VALID_MEMBER(page_buffers)) { tmp = ULONG(pcache + OFFSET(page_buffers)); if (tmp) buffers++; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { if ((flags >> v26_PG_private) & 1) buffers++; } else error(FATAL, "cannot determine whether pages have buffers\n"); if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SLAB_PAGES: if (v22) { if ((flags >> v22_PG_Slab) & 1) slabs++; } else if (vt->PG_slab) { if ((flags >> vt->PG_slab) & 1) slabs++; } else { if ((flags >> v24_PG_slab) & 1) slabs++; } if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SHARED_PAGES: case GET_TOTALRAM_PAGES: if (vt->PG_reserved) PG_reserved_flag = vt->PG_reserved; else PG_reserved_flag = v22 ? 1 << v22_PG_reserved : 1 << v24_PG_reserved; if (flags & PG_reserved_flag) { reserved++; } else { if ((int)count > (vt->flags & PGCNT_ADJ ? 0 : 1)) shared++; } continue; } page_mapping = VALID_MEMBER(page_mapping); if (v22) { inode = ULONG(pcache + OFFSET(page_inode)); offset = ULONG(pcache + OFFSET(page_offset)); } else if (page_mapping) { mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); } page_not_mapped = phys_not_mapped = FALSE; if (v22) { bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style1, pp, phys, inode, offset, count); } else { if ((vt->flags & V_MEM_MAP)) { if (!machdep->verify_paddr(phys)) phys_not_mapped = TRUE; if (!kvtop(NULL, pp, NULL, 0)) page_not_mapped = TRUE; } if (page_not_mapped) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style2, pp, phys); else if (!page_mapping) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style3, pp, phys, count); else bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style4, pp, phys, mapping, index, count); } others = 0; #define sprintflag(X) sprintf(outputbuffer + bufferindex, X, others++ ? "," : "") if (v22) { if ((flags >> v22_PG_DMA) & 1) bufferindex += sprintflag("%sDMA"); if ((flags >> v22_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v22_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v22_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v22_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v22_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v22_PG_free_after) & 1) bufferindex += sprintflag("%sfree_after"); if ((flags >> v22_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v22_PG_swap_unlock_after) & 1) bufferindex += sprintflag("%sswap_unlock_after"); if ((flags >> v22_PG_Slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v22_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v22_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v22_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { if (vt->flags & PAGEFLAGS) bufferindex += translate_page_flags(outputbuffer+bufferindex, flags); else bufferindex += sprintf(outputbuffer+bufferindex, "%lx\n", flags); } else { if ((flags >> v24_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v24_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v24_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v24_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v24_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v24_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v24_PG_active) & 1) bufferindex += sprintflag("%sactive"); if ((flags >> v24_PG_inactive_dirty) & 1) bufferindex += sprintflag("%sinactive_dirty"); if ((flags >> v24_PG_slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v24_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v24_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v24_PG_inactive_clean) & 1) bufferindex += sprintflag("%sinactive_clean"); if ((flags >> v24_PG_highmem) & 1) bufferindex += sprintflag("%shighmem"); if ((flags >> v24_PG_checked) & 1) bufferindex += sprintflag("%schecked"); if ((flags >> v24_PG_bigpage) & 1) bufferindex += sprintflag("%sbigpage"); if ((flags >> v24_PG_arch_1) & 1) bufferindex += sprintflag("%sarch_1"); if ((flags >> v24_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); if (phys_not_mapped) bufferindex += sprintflag("%s[NOT MAPPED]"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } display_members: if (bufferindex > buffersize) { fprintf(fp, "%s", outputbuffer); bufferindex = 0; } if (done) break; } if (done) break; } if (bufferindex > 0) { fprintf(fp, "%s", outputbuffer); } switch (mi->flags) { case GET_TOTALRAM_PAGES: mi->retval = total_pages - reserved; break; case GET_SHARED_PAGES: mi->retval = shared; break; case GET_BUFFERS_PAGES: mi->retval = buffers; break; case GET_SLAB_PAGES: mi->retval = slabs; break; case GET_ALL: mi->get_totalram = total_pages - reserved; mi->get_shared = shared; mi->get_buffers = buffers; mi->get_slabs = slabs; break; case ADDRESS_SPECIFIED: mi->retval = done; break; } if (mi->nr_members) FREEBUF(mi->page_member_cache); FREEBUF(outputbuffer); FREEBUF(page_cache); } /* * Stash a chunk of PGMM_CACHED page structures, starting at addr, into the * passed-in buffer. The mem_map array is normally guaranteed to be * readable except in the case of virtual mem_map usage. When V_MEM_MAP * is in place, read all pages consumed by PGMM_CACHED page structures * that are currently mapped, leaving the unmapped ones just zeroed out. */ static void fill_mem_map_cache(ulong pp, ulong ppend, char *page_cache) { long size, cnt; ulong addr; char *bufptr; /* * Try to read it in one fell swoop. */ if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED, "page struct cache", RETURN_ON_ERROR|QUIET)) return; /* * Break it into page-size-or-less requests, warning if it's * not a virtual mem_map. */ size = SIZE(page) * PGMM_CACHED; addr = pp; bufptr = page_cache; while (size > 0) { /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(addr); if (cnt > size) cnt = size; if (!readmem(addr, KVADDR, bufptr, size, "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { BZERO(bufptr, size); if (!(vt->flags & V_MEM_MAP) && ((addr+size) < ppend)) error(WARNING, "mem_map[] from %lx to %lx not accessible\n", addr, addr+size); } addr += cnt; bufptr += cnt; size -= cnt; } } static void dump_hstates() { char *hstate; int i, len, order; long nr, free; ulong vaddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!kernel_symbol_exists("hstates")) { error(INFO, "hstates[] array does not exist\n"); option_not_supported('h'); } if (INVALID_SIZE(hstate) || INVALID_MEMBER(hstate_order) || INVALID_MEMBER(hstate_name) || INVALID_MEMBER(hstate_nr_huge_pages) || INVALID_MEMBER(hstate_free_huge_pages)) { error(INFO, "hstate structure or members have changed\n"); option_not_supported('h'); } fprintf(fp, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "HSTATE")); fprintf(fp, " SIZE FREE TOTAL NAME\n"); len = get_array_length("hstates", NULL, 0); hstate = GETBUF(SIZE(hstate)); for (i = 0; i < len; i++) { vaddr = symbol_value("hstates") + (SIZE(hstate) * i); if (!readmem(vaddr, KVADDR, hstate, SIZE(hstate), "hstate", RETURN_ON_ERROR)) break; order = INT(hstate + OFFSET(hstate_order)); if (!order) continue; fprintf(fp, "%lx ", vaddr); pages_to_size(1 << order, buf1); shift_string_left(first_space(buf1), 1); fprintf(fp, "%s ", mkstring(buf2, 5, RJUST, buf1)); free = LONG(hstate + OFFSET(hstate_free_huge_pages)); sprintf(buf1, "%ld", free); fprintf(fp, "%s ", mkstring(buf2, 6, RJUST, buf1)); nr = LONG(hstate + OFFSET(hstate_nr_huge_pages)); sprintf(buf1, "%ld", nr); fprintf(fp, "%s ", mkstring(buf2, 6, RJUST, buf1)); fprintf(fp, "%s\n", hstate + OFFSET(hstate_name)); } FREEBUF(hstate); } static void page_flags_init(void) { if (!page_flags_init_from_pageflag_names()) page_flags_init_from_pageflags_enum(); PG_reserved_flag_init(); PG_slab_flag_init(); } static int page_flags_init_from_pageflag_names(void) { int i, len; char *buffer, *nameptr; char namebuf[BUFSIZE]; ulong mask; void *name; MEMBER_OFFSET_INIT(trace_print_flags_mask, "trace_print_flags", "mask"); MEMBER_OFFSET_INIT(trace_print_flags_name, "trace_print_flags", "name"); STRUCT_SIZE_INIT(trace_print_flags, "trace_print_flags"); if (INVALID_SIZE(trace_print_flags) || INVALID_MEMBER(trace_print_flags_mask) || INVALID_MEMBER(trace_print_flags_name) || !kernel_symbol_exists("pageflag_names") || !(len = get_array_length("pageflag_names", NULL, 0))) return FALSE; buffer = GETBUF(SIZE(trace_print_flags) * len); if (!readmem(symbol_value("pageflag_names"), KVADDR, buffer, SIZE(trace_print_flags) * len, "pageflag_names array", RETURN_ON_ERROR)) { FREEBUF(buffer); return FALSE; } if (!(vt->pageflags_data = (struct pageflags_data *) malloc(sizeof(struct pageflags_data) * len))) { error(INFO, "cannot malloc pageflags_data cache\n"); FREEBUF(buffer); return FALSE; } if (CRASHDEBUG(1)) fprintf(fp, "pageflags from pageflag_names: \n"); for (i = 0; i < len; i++) { mask = ULONG(buffer + (SIZE(trace_print_flags)*i) + OFFSET(trace_print_flags_mask)); name = VOID_PTR(buffer + (SIZE(trace_print_flags)*i) + OFFSET(trace_print_flags_name)); if ((mask == -1UL) && !name) { /* Linux 3.5 and earlier */ len--; break; } if (!read_string((ulong)name, namebuf, BUFSIZE-1)) { error(INFO, "failed to read pageflag_names entry\n", i, name, mask); goto pageflags_fail; } if (!(nameptr = (char *)malloc(strlen(namebuf)+1))) { error(INFO, "cannot malloc pageflag_names space\n"); goto pageflags_fail; } strcpy(nameptr, namebuf); vt->pageflags_data[i].name = nameptr; vt->pageflags_data[i].mask = mask; if (CRASHDEBUG(1)) { fprintf(fp, " %08lx %s\n", vt->pageflags_data[i].mask, vt->pageflags_data[i].name); } } FREEBUF(buffer); vt->nr_pageflags = len; vt->flags |= PAGEFLAGS; return TRUE; pageflags_fail: FREEBUF(buffer); free(vt->pageflags_data); vt->pageflags_data = NULL; return FALSE; } static int page_flags_init_from_pageflags_enum(void) { int c; int p, len; char *nameptr; char buf[BUFSIZE]; char *arglist[MAXARGS]; if (!(vt->pageflags_data = (struct pageflags_data *) malloc(sizeof(struct pageflags_data) * 32))) { error(INFO, "cannot malloc pageflags_data cache\n"); return FALSE; } p = 0; pc->flags2 |= ALLOW_FP; open_tmpfile(); if (dump_enumerator_list("pageflags")) { rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (!strstr(buf, " = ")) continue; c = parse_line(buf, arglist); if (strstr(arglist[0], "__NR_PAGEFLAGS")) { len = atoi(arglist[2]); if (!len || (len > 32)) goto enum_fail; vt->nr_pageflags = len; break; } if (!(nameptr = (char *)malloc(strlen(arglist[0])))) { error(INFO, "cannot malloc pageflags name space\n"); goto enum_fail; } strcpy(nameptr, arglist[0] + strlen("PG_")); vt->pageflags_data[p].name = nameptr; vt->pageflags_data[p].mask = 1 << atoi(arglist[2]); p++; } } else goto enum_fail; close_tmpfile(); pc->flags2 &= ~ALLOW_FP; if (CRASHDEBUG(1)) { fprintf(fp, "pageflags from enum: \n"); for (p = 0; p < vt->nr_pageflags; p++) fprintf(fp, " %08lx %s\n", vt->pageflags_data[p].mask, vt->pageflags_data[p].name); } vt->flags |= PAGEFLAGS; return TRUE; enum_fail: close_tmpfile(); pc->flags2 &= ~ALLOW_FP; for (c = 0; c < p; c++) free(vt->pageflags_data[c].name); free(vt->pageflags_data); vt->pageflags_data = NULL; vt->nr_pageflags = 0; return FALSE; } static int translate_page_flags(char *buffer, ulong flags) { char buf[BUFSIZE]; int i, others; sprintf(buf, "%lx", flags); if (flags) { for (i = others = 0; i < vt->nr_pageflags; i++) { if (flags & vt->pageflags_data[i].mask) sprintf(&buf[strlen(buf)], "%s%s", others++ ? "," : " ", vt->pageflags_data[i].name); } } strcat(buf, "\n"); strcpy(buffer, buf); return(strlen(buf)); } /* * Display the mem_map data for a single page. */ int dump_inode_page(ulong page) { struct meminfo meminfo; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.spec_addr = page; meminfo.memtype = KVADDR; meminfo.flags = ADDRESS_SPECIFIED; dump_mem_map(&meminfo); return meminfo.retval; } /* * dump_page_hash_table() displays the entries in each page_hash_table. */ #define PGHASH_CACHED (1024) static void dump_page_hash_table(struct meminfo *hi) { int i; int len, entry_len; ulong page_hash_table, head; struct list_data list_data, *ld; struct gnu_request req; long total_cached; long page_cache_size; ulong this_addr, searchpage; int errflag, found, cnt, populated, verbose; uint ival; ulong buffer_pages; char buf[BUFSIZE]; char hash_table[BUFSIZE]; char *pcache, *pghash_cache; if (!vt->page_hash_table) { if (hi->flags & VERBOSE) option_not_supported('C'); if (symbol_exists("nr_pagecache")) { buffer_pages = nr_blockdev_pages(); get_symbol_data("nr_pagecache", sizeof(int), &ival); page_cache_size = (ulong)ival; page_cache_size -= buffer_pages; fprintf(fp, "page cache size: %ld\n", page_cache_size); if (hi->flags & ADDRESS_SPECIFIED) option_not_supported('c'); } else option_not_supported('c'); return; } ld = &list_data; if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) { verbose = TRUE; searchpage = hi->spec_addr; } else if (hi->flags & VERBOSE) { verbose = TRUE; searchpage = 0; } else { verbose = FALSE; searchpage = 0; } if (vt->page_hash_table_len == 0) error(FATAL, "cannot determine size of page_hash_table\n"); page_hash_table = vt->page_hash_table; len = vt->page_hash_table_len; entry_len = VALID_STRUCT(page_cache_bucket) ? SIZE(page_cache_bucket) : sizeof(void *); populated = 0; if (CRASHDEBUG(1)) fprintf(fp, "page_hash_table length: %d\n", len); get_symbol_type("page_cache_size", NULL, &req); if (req.length == sizeof(int)) { get_symbol_data("page_cache_size", sizeof(int), &ival); page_cache_size = (long)ival; } else get_symbol_data("page_cache_size", sizeof(long), &page_cache_size); pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED); if (searchpage) open_tmpfile(); hq_open(); for (i = total_cached = 0; i < len; i++, page_hash_table += entry_len) { if ((i % PGHASH_CACHED) == 0) { readmem(page_hash_table, KVADDR, pghash_cache, entry_len * PGHASH_CACHED, "page hash cache", FAULT_ON_ERROR); } pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len); if (VALID_STRUCT(page_cache_bucket)) pcache += OFFSET(page_cache_bucket_chain); head = ULONG(pcache); if (!head) continue; if (verbose) fprintf(fp, "page_hash_table[%d]\n", i); if (CRASHDEBUG(1)) populated++; BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = head; ld->searchfor = searchpage; ld->member_offset = OFFSET(page_next_hash); cnt = do_list(ld); total_cached += cnt; if (ld->searchfor) break; if (received_SIGINT()) restart(0); } hq_close(); fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "", page_cache_size); if (page_cache_size != total_cached) fprintf(fp, "(found %ld)\n", total_cached); else fprintf(fp, "(verified)\n"); if (CRASHDEBUG(1)) fprintf(fp, "heads containing page(s): %d\n", populated); if (searchpage) { rewind(pc->tmpfile); found = FALSE; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "retval = TRUE; } } } /* * dump_free_pages() displays basic data about pages currently resident * in the free_area[] memory lists. If the flags contains the VERBOSE * bit, each page slab base address is dumped. If an address is specified * only the free_area[] data containing that page is displayed, along with * the page slab base address. Specified addresses can either be physical * address or page structure pointers. */ char *free_area_hdr1 = \ "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; char *free_area_hdr2 = \ "AREA SIZE FREE_AREA_STRUCT\n"; static void dump_free_pages(struct meminfo *fi) { int i; int order; ulong free_area; char *free_area_buf; ulong *pp; int nr_mem_lists; struct list_data list_data, *ld; long cnt, total_free, chunk_size; int nr_free_pages; char buf[BUFSIZE]; char last_free[BUFSIZE]; char last_free_hdr[BUFSIZE]; int verbose, errflag, found; physaddr_t searchphys; ulong this_addr; physaddr_t this_phys; int do_search; ulong kfp, offset; int flen, dimension; if (vt->flags & (NODES|ZONES)) error(FATAL, "dump_free_pages called with (NODES|ZONES)\n"); nr_mem_lists = ARRAY_LENGTH(free_area); dimension = ARRAY_LENGTH(free_area_DIMENSION); if (nr_mem_lists == 0) error(FATAL, "cannot determine size/dimensions of free_area\n"); if (dimension) error(FATAL, "dump_free_pages called with multidimensional free area\n"); ld = &list_data; total_free = 0; searchphys = 0; chunk_size = 0; do_search = FALSE; get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); switch (fi->flags) { case GET_FREE_HIGHMEM_PAGES: error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); case GET_FREE_PAGES: fi->retval = (ulong)nr_free_pages; return; case ADDRESS_SPECIFIED: switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages: no memtype specified\n"); } do_search = TRUE; break; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct)); kfp = free_area = symbol_value("free_area"); flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); readmem(free_area, KVADDR, free_area_buf, SIZE(free_area_struct) * nr_mem_lists, "free_area_struct", FAULT_ON_ERROR); if (do_search) open_tmpfile(); if (!verbose) fprintf(fp, "%s", free_area_hdr1); hq_open(); for (i = 0; i < nr_mem_lists; i++) { pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i)); chunk_size = power(2, i); if (verbose) fprintf(fp, "%s", free_area_hdr2); fprintf(fp, "%3d ", i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, "%5s ", buf); fprintf(fp, "%s %s", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), verbose ? "\n" : ""); if (is_page_ptr(*pp, NULL)) { BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = *pp; ld->end = free_area; cnt = do_list(ld); total_free += (cnt * chunk_size); } else cnt = 0; if (!verbose) fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); free_area += SIZE(free_area_struct); kfp += SIZE(free_area_struct); } hq_close(); fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages); if (total_free != nr_free_pages) fprintf(fp, "(found %ld)\n", total_free); else fprintf(fp, "(verified)\n"); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { order--; fprintf(fp, "%s", last_free_hdr); fprintf(fp, "%s", last_free); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Dump free pages on kernels with a multi-dimensional free_area array. */ char *free_area_hdr5 = \ " AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; char *free_area_hdr6 = \ " AREA SIZE FREE_AREA_STRUCT\n"; static void dump_multidimensional_free_pages(struct meminfo *fi) { int i, j; struct list_data list_data, *ld; long cnt, total_free; ulong kfp, free_area; physaddr_t searchphys; int flen, errflag, verbose, nr_free_pages; int nr_mem_lists, dimension, order, do_search; ulong sum, found, offset; char *free_area_buf, *p; ulong *pp; long chunk_size; ulong this_addr; physaddr_t this_phys; char buf[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (vt->flags & (NODES|ZONES)) error(FATAL, "dump_multidimensional_free_pages called with (NODES|ZONES)\n"); ld = &list_data; if (SIZE(free_area_struct) % sizeof(ulong)) error(FATAL, "free_area_struct not long-word aligned?\n"); total_free = 0; searchphys = 0; chunk_size = 0; do_search = FALSE; get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); switch (fi->flags) { case GET_FREE_HIGHMEM_PAGES: error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); case GET_FREE_PAGES: fi->retval = (ulong)nr_free_pages; return; case ADDRESS_SPECIFIED: switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_multidimensional_free_pages: no memtype specified\n"); } do_search = TRUE; break; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); nr_mem_lists = ARRAY_LENGTH(free_area); dimension = ARRAY_LENGTH(free_area_DIMENSION); if (!nr_mem_lists || !dimension) error(FATAL, "cannot determine free_area dimensions\n"); free_area_buf = GETBUF((nr_mem_lists * SIZE(free_area_struct)) * dimension); kfp = free_area = symbol_value("free_area"); readmem(free_area, KVADDR, free_area_buf, (SIZE(free_area_struct) * nr_mem_lists) * dimension, "free_area arrays", FAULT_ON_ERROR); if (do_search) open_tmpfile(); hq_open(); for (i = sum = found = 0; i < dimension; i++) { if (!verbose) fprintf(fp, "%s", free_area_hdr5); pp = (ulong *)(free_area_buf + ((SIZE(free_area_struct)*nr_mem_lists)*i)); for (j = 0; j < nr_mem_lists; j++) { if (verbose) fprintf(fp, "%s", free_area_hdr6); sprintf(buf, "[%d][%d]", i, j); fprintf(fp, "%7s ", buf); chunk_size = power(2, j); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, "%5s ", buf); fprintf(fp, "%s %s", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), verbose ? "\n" : ""); if (is_page_ptr(*pp, NULL)) { BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = *pp; ld->end = free_area; cnt = do_list(ld); total_free += (cnt * chunk_size); } else cnt = 0; if (!verbose) fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); pp += (SIZE(free_area_struct)/sizeof(ulong)); free_area += SIZE(free_area_struct); kfp += SIZE(free_area_struct); } fprintf(fp, "\n"); } hq_close(); fprintf(fp, "nr_free_pages: %d ", nr_free_pages); if (total_free != nr_free_pages) fprintf(fp, "(found %ld)\n", total_free); else fprintf(fp, "(verified)\n"); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Dump free pages in newer kernels that have zones. This is a work in * progress, because although the framework for memory nodes has been laid * down, complete support has not been put in place. */ static char *zone_hdr = "ZONE NAME SIZE FREE"; static void dump_free_pages_zones_v1(struct meminfo *fi) { int i, n; ulong node_zones; ulong size; long zone_size_offset; long chunk_size; int order, errflag, do_search; ulong offset, verbose, value, sum, found; ulong this_addr; physaddr_t this_phys, searchphys; ulong zone_mem_map; ulong zone_start_paddr; ulong zone_start_mapnr; struct node_table *nt; char buf[BUFSIZE], *p; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char last_node[BUFSIZE]; char last_zone[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (!(vt->flags & (NODES|ZONES))) error(FATAL, "dump_free_pages_zones_v1 called without (NODES|ZONES)\n"); if (fi->flags & ADDRESS_SPECIFIED) { switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages_zones_v1: no memtype specified\n"); } do_search = TRUE; } else { searchphys = 0; do_search = FALSE; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; chunk_size = 0; zone_size_offset = 0; if (VALID_MEMBER(zone_struct_size)) zone_size_offset = OFFSET(zone_struct_size); else if (VALID_MEMBER(zone_struct_memsize)) zone_size_offset = OFFSET(zone_struct_memsize); else error(FATAL, "zone_struct has neither size nor memsize field\n"); if (do_search) open_tmpfile(); hq_open(); for (n = sum = found = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (fi->flags == GET_FREE_PAGES) { readmem(node_zones+ OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; node_zones += SIZE(zone_struct); continue; } if (fi->flags == GET_FREE_HIGHMEM_PAGES) { if (i == vt->ZONE_HIGHMEM) { readmem(node_zones+ OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; } node_zones += SIZE(zone_struct); continue; } if (fi->flags == GET_ZONE_SIZES) { readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones {mem}size", FAULT_ON_ERROR); sum += size; node_zones += SIZE(zone_struct); continue; } if ((i == 0) && (vt->flags & NODES)) { if (n) { fprintf(fp, "\n"); pad_line(fp, VADDR_PRLEN > 8 ? 74 : 66, '-'); fprintf(fp, "\n"); } fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", i > 0 ? "\n" : "", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%3d ", i); readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1)) fprintf(fp, "%-9s ", buf); else fprintf(fp, "(unknown) "); readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones {mem}size", FAULT_ON_ERROR); fprintf(fp, "%6ld ", size); readmem(node_zones+OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); fprintf(fp, "%6ld ", value); readmem(node_zones+OFFSET(zone_struct_zone_start_paddr), KVADDR, &zone_start_paddr, sizeof(ulong), "node_zones zone_start_paddr", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_zone_start_mapnr), KVADDR, &zone_start_mapnr, sizeof(ulong), "node_zones zone_start_mapnr", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), CENTER|LONG_HEX|RJUST, MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); sum += value; if (value) found += dump_zone_free_area(node_zones+ OFFSET(zone_struct_free_area), vt->nr_free_areas, verbose, NULL); node_zones += SIZE(zone_struct); } } hq_close(); if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { fi->retval = sum; return; } fprintf(fp, "\nnr_free_pages: %ld ", sum); if (sum == found) fprintf(fp, "(verified)\n"); else fprintf(fp, "(found %ld)\n", found); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; last_node[0] = NULLCHAR; last_zone[0] = NULLCHAR; last_area[0] = NULLCHAR; last_area_hdr[0] = NULLCHAR; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_node, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "ZONE")) { p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_zone, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "AREA")) { strcpy(last_area_hdr, buf); p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } if (CRASHDEBUG(0) && !hexadecimal(strip_linefeeds(buf), 0)) continue; errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { if (strlen(last_node)) fprintf(fp, "NODE\n%s\n", last_node); fprintf(fp, "%s %s START_PADDR START_MAPNR\n", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s\n", last_zone); fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Callback function for free-list search for a specific page. */ struct free_page_callback_data { physaddr_t searchphys; long block_size; ulong page; int found; }; static int free_page_callback(void *page, void *arg) { struct free_page_callback_data *cbd = arg; physaddr_t this_phys; if (!page_to_phys((ulong)page, &this_phys)) return FALSE; if ((cbd->searchphys >= this_phys) && (cbd->searchphys < (this_phys + cbd->block_size))) { cbd->page = (ulong)page; cbd->found = TRUE; return TRUE; } return FALSE; } /* * Same as dump_free_pages_zones_v1(), but updated for numerous 2.6 zone * and free_area related data structure changes. */ static void dump_free_pages_zones_v2(struct meminfo *fi) { int i, n; ulong node_zones; ulong size; long zone_size_offset; long chunk_size; int order, errflag, do_search; ulong offset, verbose, value, sum, found; ulong this_addr; physaddr_t phys, this_phys, searchphys, end_paddr; struct free_page_callback_data callback_data; ulong pp; ulong zone_mem_map; ulong zone_start_paddr; ulong zone_start_pfn; ulong zone_start_mapnr; struct node_table *nt; char buf[BUFSIZE], *p; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char last_node[BUFSIZE]; char last_zone[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (!(vt->flags & (NODES|ZONES))) error(FATAL, "dump_free_pages_zones_v2 called without (NODES|ZONES)\n"); if (fi->flags & ADDRESS_SPECIFIED) { switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages_zones_v2: no memtype specified\n"); } do_search = TRUE; callback_data.searchphys = searchphys; callback_data.found = FALSE; } else { searchphys = 0; do_search = FALSE; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; zone_size_offset = 0; chunk_size = 0; this_addr = 0; if (VALID_MEMBER(zone_spanned_pages)) zone_size_offset = OFFSET(zone_spanned_pages); else error(FATAL, "zone struct has no spanned_pages field\n"); if (do_search) open_tmpfile(); hq_open(); for (n = sum = found = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (fi->flags == GET_FREE_PAGES) { readmem(node_zones+ OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; node_zones += SIZE(zone); continue; } if (fi->flags == GET_FREE_HIGHMEM_PAGES) { readmem(node_zones+OFFSET(zone_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1) && STREQ(buf, "HighMem")) vt->ZONE_HIGHMEM = i; if (i == vt->ZONE_HIGHMEM) { readmem(node_zones+ OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; } node_zones += SIZE(zone); continue; } if (fi->flags == GET_ZONE_SIZES) { readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones size", FAULT_ON_ERROR); sum += size; node_zones += SIZE(zone); continue; } if ((i == 0) && ((vt->flags & NODES) || (vt->numnodes > 1))) { if (n) { fprintf(fp, "\n"); pad_line(fp, VADDR_PRLEN > 8 ? 74 : 66, '-'); fprintf(fp, "\n"); } fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", i > 0 ? "\n" : "", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%3d ", i); readmem(node_zones+OFFSET(zone_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1)) fprintf(fp, "%-9s ", buf); else fprintf(fp, "(unknown) "); readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones size", FAULT_ON_ERROR); fprintf(fp, "%6ld ", size); readmem(node_zones+OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); fprintf(fp, "%6ld ", value); if (VALID_MEMBER(zone_zone_mem_map)) { readmem(node_zones+OFFSET(zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); } readmem(node_zones+ OFFSET(zone_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); if (!VALID_MEMBER(zone_zone_mem_map)) { if (IS_SPARSEMEM() || IS_DISCONTIGMEM()) { zone_mem_map = 0; if (size) { phys = PTOB(zone_start_pfn); if (phys_to_page(phys, &pp)) zone_mem_map = pp; } } else if (vt->flags & FLATMEM) { zone_mem_map = 0; if (size) zone_mem_map = nt->mem_map + (zone_start_pfn * SIZE(page)); } else error(FATAL, "\ncannot determine zone mem_map: TBD\n"); } if (zone_mem_map) zone_start_mapnr = (zone_mem_map - nt->mem_map) / SIZE(page); else zone_start_mapnr = 0; fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), CENTER|LONG_HEX|RJUST, MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); sum += value; if (value) { if (do_search) { end_paddr = nt->start_paddr + ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); if ((searchphys >= nt->start_paddr) && (searchphys < end_paddr)) found += dump_zone_free_area(node_zones+ OFFSET(zone_free_area), vt->nr_free_areas, verbose, &callback_data); if (callback_data.found) goto done_search; } else found += dump_zone_free_area(node_zones+ OFFSET(zone_free_area), vt->nr_free_areas, verbose, NULL); } node_zones += SIZE(zone); } } done_search: hq_close(); if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { fi->retval = sum; return; } fprintf(fp, "\nnr_free_pages: %ld ", sum); if (sum == found) fprintf(fp, "(verified)\n"); else fprintf(fp, "(found %ld)\n", found); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = 0; last_node[0] = NULLCHAR; last_zone[0] = NULLCHAR; last_area[0] = NULLCHAR; last_area_hdr[0] = NULLCHAR; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_node, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "ZONE")) { p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_zone, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "AREA")) { strcpy(last_area_hdr, buf); p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } if (CRASHDEBUG(0) && !hexadecimal(strip_linefeeds(buf), 0)) continue; errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { if (strlen(last_node)) fprintf(fp, "NODE\n%s\n", last_node); fprintf(fp, "%s %s START_PADDR START_MAPNR\n", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s\n", last_zone); fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages)", ordinal(offset+1, buf), chunk_size/PAGESIZE()); } fi->retval = TRUE; fprintf(fp, "\n"); } } static char * page_usage_hdr = "ZONE NAME FREE ACTIVE INACTIVE_DIRTY INACTIVE_CLEAN MIN/LOW/HIGH"; /* * Display info about the non-free pages in each zone. */ static int dump_zone_page_usage(void) { int i, n; ulong value, node_zones; struct node_table *nt; ulong inactive_dirty_pages, inactive_clean_pages, active_pages; ulong free_pages, pages_min, pages_low, pages_high; char namebuf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) || !VALID_MEMBER(zone_struct_inactive_clean_pages) || !VALID_MEMBER(zone_struct_active_pages) || !VALID_MEMBER(zone_struct_pages_min) || !VALID_MEMBER(zone_struct_pages_low) || !VALID_MEMBER(zone_struct_pages_high)) return FALSE; fprintf(fp, "\n"); for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); if ((vt->numnodes > 1) && (vt->flags & NODES)) { fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s\n", page_usage_hdr); for (i = 0; i < vt->nr_zones; i++) { readmem(node_zones+OFFSET(zone_struct_free_pages), KVADDR, &free_pages, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); readmem(node_zones+ OFFSET(zone_struct_inactive_dirty_pages), KVADDR, &inactive_dirty_pages, sizeof(ulong), "node_zones inactive_dirty_pages", FAULT_ON_ERROR); readmem(node_zones+ OFFSET(zone_struct_inactive_clean_pages), KVADDR, &inactive_clean_pages, sizeof(ulong), "node_zones inactive_clean_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_active_pages), KVADDR, &active_pages, sizeof(ulong), "node_zones active_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_min), KVADDR, &pages_min, sizeof(ulong), "node_zones pages_min", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_low), KVADDR, &pages_low, sizeof(ulong), "node_zones pages_low", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_high), KVADDR, &pages_high, sizeof(ulong), "node_zones pages_high", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf1, BUFSIZE-1)) sprintf(namebuf, "%-8s", buf1); else sprintf(namebuf, "(unknown)"); sprintf(buf2, "%ld/%ld/%ld", pages_min, pages_low, pages_high); fprintf(fp, "%3d %s %7ld %7ld %15ld %15ld %s\n", i, namebuf, free_pages, active_pages, inactive_dirty_pages, inactive_clean_pages, mkstring(buf3, strlen("MIN/LOW/HIGH"), CENTER, buf2)); node_zones += SIZE(zone_struct); } } return TRUE; } /* * Dump the num "order" contents of the zone_t free_area array. */ char *free_area_hdr3 = "AREA SIZE FREE_AREA_STRUCT\n"; char *free_area_hdr4 = "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; static int dump_zone_free_area(ulong free_area, int num, ulong verbose, struct free_page_callback_data *callback_data) { int i, j; long chunk_size; int flen, total_free, cnt; char buf[BUFSIZE]; ulong free_area_buf[3]; char *free_area_buf2; char *free_list_buf; ulong free_list; struct list_data list_data, *ld; int list_count; ulong *free_ptr; list_count = 0; free_list_buf = free_area_buf2 = NULL; if (VALID_STRUCT(free_area_struct)) { if (SIZE(free_area_struct) != (3 * sizeof(ulong))) error(FATAL, "unrecognized free_area_struct size: %ld\n", SIZE(free_area_struct)); list_count = 1; } else if (VALID_STRUCT(free_area)) { if (SIZE(free_area) == (3 * sizeof(ulong))) list_count = 1; else { list_count = MEMBER_SIZE("free_area", "free_list")/SIZE(list_head); free_area_buf2 = GETBUF(SIZE(free_area)); free_list_buf = GETBUF(SIZE(list_head)); readmem(free_area, KVADDR, free_area_buf2, SIZE(free_area), "free_area struct", FAULT_ON_ERROR); } } else error(FATAL, "neither free_area_struct or free_area structures exist\n"); ld = &list_data; if (!verbose) fprintf(fp, "%s", free_area_hdr4); total_free = 0; flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); if (list_count > 1) goto multiple_lists; for (i = 0; i < num; i++, free_area += SIZE_OPTION(free_area_struct, free_area)) { if (verbose) fprintf(fp, "%s", free_area_hdr3); fprintf(fp, "%3d ", i); chunk_size = power(2, i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, " %7s ", buf); readmem(free_area, KVADDR, free_area_buf, sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area))); if (free_area_buf[0] == free_area) { if (verbose) fprintf(fp, "\n"); else fprintf(fp, "%6d %6d\n", 0, 0); continue; } if (verbose) fprintf(fp, "\n"); BZERO(ld, sizeof(struct list_data)); ld->flags = verbose | RETURN_ON_DUPLICATE; ld->start = free_area_buf[0]; ld->end = free_area; if (VALID_MEMBER(page_list_next)) ld->list_head_offset = OFFSET(page_list); else if (VALID_MEMBER(page_lru)) ld->list_head_offset = OFFSET(page_lru)+ OFFSET(list_head_next); else error(FATAL, "neither page.list or page.lru exist?\n"); cnt = do_list(ld); if (cnt < 0) { error(pc->curcmd_flags & IGNORE_ERRORS ? INFO : FATAL, "corrupted free list from free_area_struct: %lx\n", free_area); if (pc->curcmd_flags & IGNORE_ERRORS) break; } if (!verbose) fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); total_free += (cnt * chunk_size); } return total_free; multiple_lists: for (i = 0; i < num; i++, free_area += SIZE_OPTION(free_area_struct, free_area)) { readmem(free_area, KVADDR, free_area_buf2, SIZE(free_area), "free_area struct", FAULT_ON_ERROR); for (j = 0, free_list = free_area; j < list_count; j++, free_list += SIZE(list_head)) { if (verbose) fprintf(fp, "%s", free_area_hdr3); fprintf(fp, "%3d ", i); chunk_size = power(2, i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, " %7s ", buf); readmem(free_list, KVADDR, free_list_buf, SIZE(list_head), "free_area free_list", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_list))); free_ptr = (ulong *)free_list_buf; if (*free_ptr == free_list) { if (verbose) fprintf(fp, "\n"); else fprintf(fp, "%6d %6d\n", 0, 0); continue; } if (verbose) fprintf(fp, "\n"); BZERO(ld, sizeof(struct list_data)); ld->flags = verbose | RETURN_ON_DUPLICATE; ld->start = *free_ptr; ld->end = free_list; ld->list_head_offset = OFFSET(page_lru) + OFFSET(list_head_next); if (callback_data) { ld->flags &= ~VERBOSE; ld->flags |= (LIST_CALLBACK|CALLBACK_RETURN); ld->callback_func = free_page_callback; ld->callback_data = (void *)callback_data; callback_data->block_size = chunk_size * PAGESIZE(); } cnt = do_list(ld); if (cnt < 0) { error(pc->curcmd_flags & IGNORE_ERRORS ? INFO : FATAL, "corrupted free list %d from free_area struct: %lx\n", j, free_area); if (pc->curcmd_flags & IGNORE_ERRORS) goto bailout; } if (callback_data && callback_data->found) { fprintf(fp, "%lx\n", callback_data->page); goto bailout; } if (!verbose) fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); total_free += (cnt * chunk_size); } } bailout: FREEBUF(free_area_buf2); FREEBUF(free_list_buf); return total_free; } /* * dump_kmeminfo displays basic memory use information typically shown * by /proc/meminfo, and then some... */ char *kmeminfo_hdr = " PAGES TOTAL PERCENTAGE\n"; static void dump_kmeminfo(void) { int i, len; ulong totalram_pages; ulong freeram_pages; ulong used_pages; ulong shared_pages; ulong buffer_pages; ulong subtract_buffer_pages; ulong totalswap_pages, totalused_pages; ulong totalhigh_pages; ulong freehighmem_pages; ulong totallowmem_pages; ulong freelowmem_pages; ulong allowed; long committed; ulong overcommit_kbytes = 0; int overcommit_ratio; ulong hugetlb_total_pages; long nr_file_pages, nr_slab; ulong swapper_space_nrpages; ulong pct; ulong value1, value2; uint tmp; struct meminfo meminfo; struct gnu_request req; long page_cache_size; ulong get_totalram; ulong get_buffers; ulong get_slabs; struct syment *sp_array[2]; char buf[BUFSIZE]; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.flags = GET_ALL; dump_mem_map(&meminfo); get_totalram = meminfo.get_totalram; shared_pages = meminfo.get_shared; get_buffers = meminfo.get_buffers; get_slabs = meminfo.get_slabs; /* * If vm_stat array exists, override page search info. */ if (vm_stat_init()) { if (dump_vm_stat("NR_SLAB", &nr_slab, 0)) get_slabs = nr_slab; else if (dump_vm_stat("NR_SLAB_RECLAIMABLE", &nr_slab, 0)) { get_slabs = nr_slab; if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE", &nr_slab, 0)) get_slabs += nr_slab; } } fprintf(fp, "%s", kmeminfo_hdr); /* * Get total RAM based upon how the various versions of si_meminfo() * have done it, latest to earliest: * * Prior to 2.3.36, count all mem_map pages minus the reserved ones. * From 2.3.36 onwards, use "totalram_pages" if set. */ if (symbol_exists("totalram_pages")) { totalram_pages = vt->totalram_pages ? vt->totalram_pages : get_totalram; } else totalram_pages = get_totalram; fprintf(fp, "%13s %7ld %11s ----\n", "TOTAL MEM", totalram_pages, pages_to_size(totalram_pages, buf)); /* * Get free pages from dump_free_pages() or its associates. * Used pages are a free-bee... */ meminfo.flags = GET_FREE_PAGES; vt->dump_free_pages(&meminfo); freeram_pages = meminfo.retval; pct = (freeram_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct); used_pages = totalram_pages - freeram_pages; pct = (used_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "USED", used_pages, pages_to_size(used_pages, buf), pct); /* * Get shared pages from dump_mem_map(). Note that this is done * differently than the kernel -- it just tallies the non-reserved * pages that have a count of greater than 1. */ pct = (shared_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "SHARED", shared_pages, pages_to_size(shared_pages, buf), pct); subtract_buffer_pages = 0; if (symbol_exists("buffermem_pages")) { get_symbol_data("buffermem_pages", sizeof(int), &tmp); buffer_pages = (ulong)tmp; } else if (symbol_exists("buffermem")) { get_symbol_data("buffermem", sizeof(int), &tmp); buffer_pages = BTOP(tmp); } else if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && symbol_exists("nr_blockdev_pages")) { subtract_buffer_pages = buffer_pages = nr_blockdev_pages(); } else buffer_pages = 0; pct = (buffer_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct); if (CRASHDEBUG(1)) error(NOTE, "pages with buffers: %ld\n", get_buffers); /* * page_cache_size has evolved from a long to an atomic_t to * not existing at all. */ if (symbol_exists("page_cache_size")) { get_symbol_type("page_cache_size", NULL, &req); if (req.length == sizeof(int)) { get_symbol_data("page_cache_size", sizeof(int), &tmp); page_cache_size = (long)tmp; } else get_symbol_data("page_cache_size", sizeof(long), &page_cache_size); page_cache_size -= subtract_buffer_pages; } else if (symbol_exists("nr_pagecache")) { get_symbol_data("nr_pagecache", sizeof(int), &tmp); page_cache_size = (long)tmp; page_cache_size -= subtract_buffer_pages; } else if (dump_vm_stat("NR_FILE_PAGES", &nr_file_pages, 0)) { char *swapper_space = GETBUF(SIZE(address_space)); swapper_space_nrpages = 0; if (symbol_exists("swapper_spaces") && (len = get_array_length("swapper_spaces", NULL, 0))) { for (i = 0; i < len; i++) { if (!readmem(symbol_value("swapper_spaces") + i * SIZE(address_space), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) break; swapper_space_nrpages += ULONG(swapper_space + OFFSET(address_space_nrpages)); } } else if (symbol_exists("swapper_space") && readmem(symbol_value("swapper_space"), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) swapper_space_nrpages = ULONG(swapper_space + OFFSET(address_space_nrpages)); page_cache_size = nr_file_pages - swapper_space_nrpages - buffer_pages; FREEBUF(swapper_space); } else page_cache_size = 0; pct = (page_cache_size * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "CACHED", page_cache_size, pages_to_size(page_cache_size, buf), pct); /* * Although /proc/meminfo doesn't show it, show how much memory * the slabs take up. */ pct = (get_slabs * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "SLAB", get_slabs, pages_to_size(get_slabs, buf), pct); if (symbol_exists("totalhigh_pages")) { switch (get_syment_array("totalhigh_pages", sp_array, 2)) { case 1: get_symbol_data("totalhigh_pages", sizeof(ulong), &totalhigh_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalhigh_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalhigh_pages #2", RETURN_ON_ERROR))) break; totalhigh_pages = MAX(value1, value2); break; } pct = totalhigh_pages ? (totalhigh_pages * 100)/totalram_pages : 0; fprintf(fp, "\n%13s %7ld %11s %3ld%% of TOTAL MEM\n", "TOTAL HIGH", totalhigh_pages, pages_to_size(totalhigh_pages, buf), pct); meminfo.flags = GET_FREE_HIGHMEM_PAGES; vt->dump_free_pages(&meminfo); freehighmem_pages = meminfo.retval; pct = freehighmem_pages ? (freehighmem_pages * 100)/totalhigh_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL HIGH\n", "FREE HIGH", freehighmem_pages, pages_to_size(freehighmem_pages, buf), pct); totallowmem_pages = totalram_pages - totalhigh_pages; pct = (totallowmem_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "TOTAL LOW", totallowmem_pages, pages_to_size(totallowmem_pages, buf), pct); freelowmem_pages = freeram_pages - freehighmem_pages; pct = (freelowmem_pages * 100)/totallowmem_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL LOW\n", "FREE LOW", freelowmem_pages, pages_to_size(freelowmem_pages, buf), pct); } /* * get swap data from dump_swap_info(). */ fprintf(fp, "\n"); if (symbol_exists("swapper_space") || symbol_exists("swapper_spaces")) { if (dump_swap_info(RETURN_ON_ERROR, &totalswap_pages, &totalused_pages)) { fprintf(fp, "%13s %7ld %11s ----\n", "TOTAL SWAP", totalswap_pages, pages_to_size(totalswap_pages, buf)); pct = totalswap_pages ? (totalused_pages * 100) / totalswap_pages : 100; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL SWAP\n", "SWAP USED", totalused_pages, pages_to_size(totalused_pages, buf), pct); pct = totalswap_pages ? ((totalswap_pages - totalused_pages) * 100) / totalswap_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL SWAP\n", "SWAP FREE", totalswap_pages - totalused_pages, pages_to_size(totalswap_pages - totalused_pages, buf), pct); } else error(INFO, "swap_info[%ld].swap_map at %lx is inaccessible\n", totalused_pages, totalswap_pages); } /* * Show committed memory */ if (kernel_symbol_exists("sysctl_overcommit_memory")) { fprintf(fp, "\n"); if (kernel_symbol_exists("sysctl_overcommit_kbytes")) get_symbol_data("sysctl_overcommit_kbytes", sizeof(ulong), &overcommit_kbytes); if (overcommit_kbytes) allowed = overcommit_kbytes >> (machdep->pageshift - 10); else { get_symbol_data("sysctl_overcommit_ratio", sizeof(int), &overcommit_ratio); if (!get_hugetlb_total_pages(&hugetlb_total_pages)) goto bailout; allowed = ((totalram_pages - hugetlb_total_pages) * overcommit_ratio / 100); } if (symbol_exists("vm_committed_as")) { if (INVALID_MEMBER(percpu_counter_count)) goto bailout; readmem(symbol_value("vm_committed_as") + OFFSET(percpu_counter_count), KVADDR, &committed, sizeof(long), "percpu_counter count", FAULT_ON_ERROR); /* Ensure always positive */ if (committed < 0) committed = 0; } else { if (INVALID_MEMBER(atomic_t_counter)) goto bailout; readmem(symbol_value("vm_committed_space") + OFFSET(atomic_t_counter), KVADDR, &committed, sizeof(int), "atomic_t counter", FAULT_ON_ERROR); } allowed += totalswap_pages; fprintf(fp, "%13s %7ld %11s ----\n", "COMMIT LIMIT", allowed, pages_to_size(allowed, buf)); if (allowed) { pct = committed ? ((committed * 100) / allowed) : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL LIMIT\n", "COMMITTED", committed, pages_to_size(committed, buf), pct); } else fprintf(fp, "%13s %7ld %11s ----\n", "COMMITTED", committed, pages_to_size(committed, buf)); } bailout: dump_zone_page_usage(); } /* * Emulate 2.6 nr_blockdev_pages() function. */ static ulong nr_blockdev_pages(void) { struct list_data list_data, *ld; int i, bdevcnt; ulong inode, address_space; ulong nrpages; char *block_device_buf, *inode_buf, *address_space_buf; ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("all_bdevs", sizeof(void *), &ld->start); if (empty_list(ld->start)) return 0; ld->flags |= LIST_ALLOCATE; ld->end = symbol_value("all_bdevs"); ld->list_head_offset = OFFSET(block_device_bd_list); block_device_buf = GETBUF(SIZE(block_device)); inode_buf = GETBUF(SIZE(inode)); address_space_buf = GETBUF(SIZE(address_space)); bdevcnt = do_list(ld); /* * go through the block_device list, emulating: * * ret += bdev->bd_inode->i_mapping->nrpages; */ for (i = nrpages = 0; i < bdevcnt; i++) { readmem(ld->list_ptr[i], KVADDR, block_device_buf, SIZE(block_device), "block_device buffer", FAULT_ON_ERROR); inode = ULONG(block_device_buf + OFFSET(block_device_bd_inode)); readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", FAULT_ON_ERROR); address_space = ULONG(inode_buf + OFFSET(inode_i_mapping)); readmem(address_space, KVADDR, address_space_buf, SIZE(address_space), "address_space buffer", FAULT_ON_ERROR); nrpages += ULONG(address_space_buf + OFFSET(address_space_nrpages)); } FREEBUF(ld->list_ptr); FREEBUF(block_device_buf); FREEBUF(inode_buf); FREEBUF(address_space_buf); return nrpages; } /* * dump_vmlist() displays information from the vmlist. */ static void dump_vmlist(struct meminfo *vi) { char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; ulong vmlist; ulong addr, size, next, pcheck, count, verified; physaddr_t paddr; int mod_vmlist; if (vt->flags & USE_VMAP_AREA) { dump_vmap_area(vi); return; } get_symbol_data("vmlist", sizeof(void *), &vmlist); next = vmlist; count = verified = 0; mod_vmlist = kernel_symbol_exists("mod_vmlist"); while (next) { if (!(pc->curcmd_flags & HEADER_PRINTED) && (next == vmlist) && !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { fprintf(fp, "%s ", mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), CENTER|LJUST, "VM_STRUCT")); fprintf(fp, "%s SIZE\n", mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "), CENTER|LJUST, "ADDRESS RANGE")); pc->curcmd_flags |= HEADER_PRINTED; } readmem(next+OFFSET(vm_struct_addr), KVADDR, &addr, sizeof(void *), "vmlist addr", FAULT_ON_ERROR); readmem(next+OFFSET(vm_struct_size), KVADDR, &size, sizeof(ulong), "vmlist size", FAULT_ON_ERROR); if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { /* * Preceding GET_VMLIST_COUNT set vi->retval. */ if (vi->flags & GET_VMLIST) { if (count < vi->retval) { vi->vmlist[count].addr = addr; vi->vmlist[count].size = size; } } count++; goto next_entry; } if (!(vi->flags & ADDRESS_SPECIFIED) || ((vi->memtype == KVADDR) && ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size))))) { if (vi->flags & VMLIST_VERIFY) { verified++; break; } fprintf(fp, "%s%s %s - %s %6ld\n", mkstring(buf,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(next)), space(MINSPACE-1), mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(addr)), mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(addr+size)), size); } if ((vi->flags & ADDRESS_SPECIFIED) && (vi->memtype == PHYSADDR)) { for (pcheck = addr; pcheck < (addr+size); pcheck += PAGESIZE()) { if (!kvtop(NULL, pcheck, &paddr, 0)) continue; if ((vi->spec_addr >= paddr) && (vi->spec_addr < (paddr+PAGESIZE()))) { if (vi->flags & GET_PHYS_TO_VMALLOC) { vi->retval = pcheck + PAGEOFFSET(paddr); return; } else fprintf(fp, "%s%s %s - %s %6ld\n", mkstring(buf, VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(next)), space(MINSPACE-1), mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(addr)), mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(addr+size)), size); break; } } } next_entry: readmem(next+OFFSET(vm_struct_next), KVADDR, &next, sizeof(void *), "vmlist next", FAULT_ON_ERROR); if (!next && mod_vmlist) { get_symbol_data("mod_vmlist", sizeof(void *), &next); mod_vmlist = FALSE; } } if (vi->flags & GET_HIGHEST) vi->retval = addr+size; if (vi->flags & GET_VMLIST_COUNT) vi->retval = count; if (vi->flags & VMLIST_VERIFY) vi->retval = verified; } static void dump_vmap_area(struct meminfo *vi) { int i, cnt; ulong start, end, vm_struct, flags; struct list_data list_data, *ld; char *vmap_area_buf; ulong size, pcheck, count, verified; physaddr_t paddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; #define VM_VM_AREA 0x4 /* mm/vmalloc.c */ vmap_area_buf = GETBUF(SIZE(vmap_area)); start = count = verified = size = 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags = LIST_HEAD_FORMAT|LIST_HEAD_POINTER|LIST_ALLOCATE; get_symbol_data("vmap_area_list", sizeof(void *), &ld->start); ld->list_head_offset = OFFSET(vmap_area_list); ld->end = symbol_value("vmap_area_list"); cnt = do_list(ld); for (i = 0; i < cnt; i++) { if (!(pc->curcmd_flags & HEADER_PRINTED) && (i == 0) && !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { fprintf(fp, "%s ", mkstring(buf1, MAX(strlen("VMAP_AREA"), VADDR_PRLEN), CENTER|LJUST, "VMAP_AREA")); fprintf(fp, "%s ", mkstring(buf1, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), CENTER|LJUST, "VM_STRUCT")); fprintf(fp, "%s SIZE\n", mkstring(buf1, (VADDR_PRLEN * 2) + strlen(" - "), CENTER|LJUST, "ADDRESS RANGE")); pc->curcmd_flags |= HEADER_PRINTED; } readmem(ld->list_ptr[i], KVADDR, vmap_area_buf, SIZE(vmap_area), "vmap_area struct", FAULT_ON_ERROR); flags = ULONG(vmap_area_buf + OFFSET(vmap_area_flags)); if (flags != VM_VM_AREA) continue; start = ULONG(vmap_area_buf + OFFSET(vmap_area_va_start)); end = ULONG(vmap_area_buf + OFFSET(vmap_area_va_end)); vm_struct = ULONG(vmap_area_buf + OFFSET(vmap_area_vm)); size = end - start; if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { /* * Preceding GET_VMLIST_COUNT set vi->retval. */ if (vi->flags & GET_VMLIST) { if (count < vi->retval) { vi->vmlist[count].addr = start; vi->vmlist[count].size = size; } } count++; continue; } if (!(vi->flags & ADDRESS_SPECIFIED) || ((vi->memtype == KVADDR) && ((vi->spec_addr >= start) && (vi->spec_addr < (start+size))))) { if (vi->flags & VMLIST_VERIFY) { verified++; break; } fprintf(fp, "%s%s %s%s %s - %s %7ld\n", mkstring(buf1,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(ld->list_ptr[i])), space(MINSPACE-1), mkstring(buf2,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(vm_struct)), space(MINSPACE-1), mkstring(buf3, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(start)), mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(start+size)), size); } if ((vi->flags & ADDRESS_SPECIFIED) && (vi->memtype == PHYSADDR)) { for (pcheck = start; pcheck < (start+size); pcheck += PAGESIZE()) { if (!kvtop(NULL, pcheck, &paddr, 0)) continue; if ((vi->spec_addr >= paddr) && (vi->spec_addr < (paddr+PAGESIZE()))) { if (vi->flags & GET_PHYS_TO_VMALLOC) { vi->retval = pcheck + PAGEOFFSET(paddr); FREEBUF(ld->list_ptr); return; } else fprintf(fp, "%s%s %s%s %s - %s %7ld\n", mkstring(buf1,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(ld->list_ptr[i])), space(MINSPACE-1), mkstring(buf2, VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(vm_struct)), space(MINSPACE-1), mkstring(buf3, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(start)), mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(start+size)), size); break; } } } } FREEBUF(ld->list_ptr); if (vi->flags & GET_HIGHEST) vi->retval = start+size; if (vi->flags & GET_VMLIST_COUNT) vi->retval = count; if (vi->flags & VMLIST_VERIFY) vi->retval = verified; } /* * dump_page_lists() displays information from the active_list, * inactive_dirty_list and inactive_clean_list from each zone. */ static int dump_page_lists(struct meminfo *mi) { int i, c, n, retval; ulong node_zones, pgdat; struct node_table *nt; struct list_data list_data, *ld; char buf[BUFSIZE]; ulong value; ulong inactive_clean_pages, inactive_clean_list; int nr_active_pages, nr_inactive_pages; int nr_inactive_dirty_pages; ld = &list_data; retval = FALSE; nr_active_pages = nr_inactive_dirty_pages = -1; BZERO(ld, sizeof(struct list_data)); ld->list_head_offset = OFFSET(page_lru); if (mi->flags & ADDRESS_SPECIFIED) ld->searchfor = mi->spec_addr; else if (mi->flags & VERBOSE) ld->flags |= VERBOSE; if (mi->flags & GET_ACTIVE_LIST) { if (!symbol_exists("active_list")) error(FATAL, "active_list does not exist in this kernel\n"); if (symbol_exists("nr_active_pages")) get_symbol_data("nr_active_pages", sizeof(int), &nr_active_pages); else error(FATAL, "nr_active_pages does not exist in this kernel\n"); ld->end = symbol_value("active_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "active_list:\n"); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_active_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_active_pages); if (c != nr_active_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_LIST) { if (!symbol_exists("inactive_list")) error(FATAL, "inactive_list does not exist in this kernel\n"); if (symbol_exists("nr_inactive_pages")) get_symbol_data("nr_inactive_pages", sizeof(int), &nr_inactive_pages); else error(FATAL, "nr_active_pages does not exist in this kernel\n"); ld->end = symbol_value("inactive_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "inactive_list:\n"); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_inactive_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_inactive_pages); if (c != nr_inactive_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_DIRTY) { if (!symbol_exists("inactive_dirty_list")) error(FATAL, "inactive_dirty_list does not exist in this kernel\n"); if (symbol_exists("nr_inactive_dirty_pages")) get_symbol_data("nr_inactive_dirty_pages", sizeof(int), &nr_inactive_dirty_pages); else error(FATAL, "nr_inactive_dirty_pages does not exist in this kernel\n"); ld->end = symbol_value("inactive_dirty_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "%sinactive_dirty_list:\n", mi->flags & GET_ACTIVE_LIST ? "\n" : ""); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_inactive_dirty_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_inactive_dirty_pages); if (c != nr_inactive_dirty_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_CLEAN) { if (INVALID_MEMBER(zone_struct_inactive_clean_list)) error(FATAL, "inactive_clean_list(s) do not exist in this kernel\n"); get_symbol_data("pgdat_list", sizeof(void *), &pgdat); if ((mi->flags & VERBOSE) && (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY))) fprintf(fp, "\n"); for (n = 0; pgdat; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "zone_struct name", FAULT_ON_ERROR); if (!read_string(value, buf, BUFSIZE-1)) sprintf(buf, "(unknown) "); if (mi->flags & VERBOSE) { if (vt->numnodes > 1) fprintf(fp, "NODE %d ", n); fprintf(fp, "\"%s\" inactive_clean_list:\n", buf); } readmem(node_zones + OFFSET(zone_struct_inactive_clean_pages), KVADDR, &inactive_clean_pages, sizeof(ulong), "inactive_clean_pages", FAULT_ON_ERROR); readmem(node_zones + OFFSET(zone_struct_inactive_clean_list), KVADDR, &inactive_clean_list, sizeof(ulong), "inactive_clean_list", FAULT_ON_ERROR); ld->start = inactive_clean_list; ld->end = node_zones + OFFSET(zone_struct_inactive_clean_list); if (mi->flags & ADDRESS_SPECIFIED) ld->searchfor = mi->spec_addr; if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { if (vt->numnodes > 1) fprintf(fp, "NODE %d ", n); fprintf(fp, "\"%s\" ", buf); fprintf(fp, "inactive_clean_pages: %ld ", inactive_clean_pages); if (c != inactive_clean_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } node_zones += SIZE(zone_struct); } readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } } return retval; } /* * Check whether an address is a kmem_cache_t address, and if so, return * a pointer to the static buffer containing its name string. Otherwise * return NULL on failure. */ #define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n" static char * is_kmem_cache_addr(ulong vaddr, char *kbuf) { ulong cache, cache_cache, name; long next_offset, name_offset; if (vt->flags & KMEM_CACHE_UNAVAIL) { error(INFO, "kmem cache slab subsystem not available\n"); return NULL; } if (vt->flags & KMALLOC_SLUB) return is_kmem_cache_addr_common(vaddr, kbuf); if ((vt->flags & KMALLOC_COMMON) && !symbol_exists("cache_cache")) return is_kmem_cache_addr_common(vaddr, kbuf); name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); cache = cache_cache = symbol_value("cache_cache"); do { if (cache == vaddr) { if (vt->kmem_cache_namelen) { readmem(cache+name_offset, KVADDR, kbuf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(cache+name_offset, KVADDR, &name, sizeof(name), "name", FAULT_ON_ERROR); if (!read_string(name, kbuf, BUFSIZE-1)) { if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); else error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(kbuf, "(unknown)"); } } return kbuf; } readmem(cache+next_offset, KVADDR, &cache, sizeof(long), "kmem_cache_s next", FAULT_ON_ERROR); if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) cache -= next_offset; } while (cache != cache_cache); return NULL; } /* * Note same functionality as above, but instead it just * dumps all slab cache names and their addresses. */ static void kmem_cache_list(void) { ulong cache, cache_cache, name; long next_offset, name_offset; char *cache_buf; int has_cache_chain; ulong cache_chain; char buf[BUFSIZE]; if (vt->flags & KMEM_CACHE_UNAVAIL) { error(INFO, "kmem cache slab subsystem not available\n"); return; } if (vt->flags & (KMALLOC_SLUB|KMALLOC_COMMON)) { kmem_cache_list_common(); return; } if (symbol_exists("cache_chain")) { has_cache_chain = TRUE; cache_chain = symbol_value("cache_chain"); } else { has_cache_chain = FALSE; cache_chain = 0; } name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); cache = cache_cache = symbol_value("cache_cache"); cache_buf = GETBUF(SIZE(kmem_cache_s)); do { readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", FAULT_ON_ERROR); if (vt->kmem_cache_namelen) { BCOPY(cache_buf+name_offset, buf, vt->kmem_cache_namelen); } else { name = ULONG(cache_buf + name_offset); if (!read_string(name, buf, BUFSIZE-1)) { if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); else error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(buf, "(unknown)"); } } fprintf(fp, "%lx %s\n", cache, buf); cache = ULONG(cache_buf + next_offset); if (has_cache_chain && (cache == cache_chain)) readmem(cache, KVADDR, &cache, sizeof(char *), "cache_chain", FAULT_ON_ERROR); if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) cache -= next_offset; } while (cache != cache_cache); FREEBUF(cache_buf); } /* * Translate an address to its physical page number, verify that the * page in fact belongs to the slab subsystem, and if so, return the * name of the cache to which it belongs. */ static char * vaddr_to_kmem_cache(ulong vaddr, char *buf, int verbose) { physaddr_t paddr; ulong page, cache, page_flags; if (!kvtop(NULL, vaddr, &paddr, 0)) { if (verbose) error(WARNING, "cannot make virtual-to-physical translation: %lx\n", vaddr); return NULL; } if (!phys_to_page(paddr, &page)) { if (verbose) error(WARNING, "cannot find mem_map page for address: %lx\n", vaddr); return NULL; } if (vt->PG_slab) { readmem(page+OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", FAULT_ON_ERROR); if (!(page_flags & (1 << vt->PG_slab))) { if (((vt->flags & KMALLOC_SLUB) || VALID_MEMBER(page_compound_head)) || ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab) && VALID_MEMBER(page_first_page))) { readmem(compound_head(page)+OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", FAULT_ON_ERROR); if (!(page_flags & (1 << vt->PG_slab))) return NULL; } else return NULL; } } if ((vt->flags & KMALLOC_SLUB) || ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab) && (VALID_MEMBER(page_compound_head) || VALID_MEMBER(page_first_page)))) { readmem(compound_head(page)+OFFSET(page_slab), KVADDR, &cache, sizeof(void *), "page.slab", FAULT_ON_ERROR); } else if (VALID_MEMBER(page_next)) readmem(page+OFFSET(page_next), KVADDR, &cache, sizeof(void *), "page.next", FAULT_ON_ERROR); else if (VALID_MEMBER(page_list_next)) readmem(page+OFFSET(page_list_next), KVADDR, &cache, sizeof(void *), "page.list.next", FAULT_ON_ERROR); else if (VALID_MEMBER(page_lru)) readmem(page+OFFSET(page_lru)+OFFSET(list_head_next), KVADDR, &cache, sizeof(void *), "page.lru.next", FAULT_ON_ERROR); else error(FATAL, "cannot determine slab cache from page struct\n"); return(is_kmem_cache_addr(cache, buf)); } static char * is_slab_overload_page(ulong vaddr, ulong *page_head, char *buf) { ulong cache; char *p; if ((vt->flags & SLAB_OVERLOAD_PAGE) && is_page_ptr(vaddr, NULL) && VALID_MEMBER(page_slab) && (VALID_MEMBER(page_compound_head) || VALID_MEMBER(page_first_page))) { readmem(compound_head(vaddr)+OFFSET(page_slab), KVADDR, &cache, sizeof(void *), "page.slab", FAULT_ON_ERROR); p = is_kmem_cache_addr(cache, buf); if (p) *page_head = compound_head(vaddr); return p; } return NULL; } /* * Translate an address to its physical page number, verify that the * page in fact belongs to the slab subsystem, and if so, return the * address of the slab to which it belongs. */ static ulong vaddr_to_slab(ulong vaddr) { physaddr_t paddr; ulong page; ulong slab; if (!kvtop(NULL, vaddr, &paddr, 0)) { error(WARNING, "cannot make virtual-to-physical translation: %lx\n", vaddr); return 0; } if (!phys_to_page(paddr, &page)) { error(WARNING, "cannot find mem_map page for address: %lx\n", vaddr); return 0; } slab = 0; if ((vt->flags & KMALLOC_SLUB) || VALID_MEMBER(page_compound_head)) slab = compound_head(page); else if (vt->flags & SLAB_OVERLOAD_PAGE) slab = compound_head(page); else if ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab_page)) readmem(page+OFFSET(page_slab_page), KVADDR, &slab, sizeof(void *), "page.slab_page", FAULT_ON_ERROR); else if (VALID_MEMBER(page_prev)) readmem(page+OFFSET(page_prev), KVADDR, &slab, sizeof(void *), "page.prev", FAULT_ON_ERROR); else if (VALID_MEMBER(page_list_prev)) readmem(page+OFFSET(page_list_prev), KVADDR, &slab, sizeof(void *), "page.list.prev", FAULT_ON_ERROR); else if (VALID_MEMBER(page_lru)) readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev), KVADDR, &slab, sizeof(void *), "page.lru.prev", FAULT_ON_ERROR); else error(FATAL, "unknown definition of struct page?\n"); return slab; } /* * Initialize any data required for scouring the kmalloc subsystem more * efficiently. */ char slab_hdr[100] = { 0 }; char kmem_cache_hdr[100] = { 0 }; char free_inuse_hdr[100] = { 0 }; static void kmem_cache_init(void) { ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2; long cache_count, num_offset, next_offset; char *cache_buf; if (vt->flags & KMEM_CACHE_UNAVAIL) return; if ((vt->flags & KMEM_CACHE_DELAY) && !(pc->flags & RUNTIME)) return; if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) return; please_wait("gathering kmem slab cache data"); if (!strlen(slab_hdr)) { if (vt->flags & KMALLOC_SLUB) sprintf(slab_hdr, "SLAB%sMEMORY%sNODE TOTAL ALLOCATED FREE\n", space(VADDR_PRLEN > 8 ? 14 : 6), space(VADDR_PRLEN > 8 ? 12 : 4)); else sprintf(slab_hdr, "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", space(VADDR_PRLEN > 8 ? 14 : 6), space(VADDR_PRLEN > 8 ? 12 : 4)); } if (!strlen(kmem_cache_hdr)) sprintf(kmem_cache_hdr, "CACHE%sNAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE\n", space(VADDR_PRLEN > 8 ? 12 : 4)); if (!strlen(free_inuse_hdr)) sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); if (vt->flags & KMALLOC_SLUB) { kmem_cache_init_slub(); please_wait_done(); return; } num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); max_cnum = max_limit = max_cpus = cache_count = tmp2 = 0; /* * Pre-2.6 versions used the "cache_cache" as the head of the * slab chain list. 2.6 uses the "cache_chain" list_head. * In 3.6 SLAB and SLUB use the "slab_caches" list_head. */ if (vt->flags & PERCPU_KMALLOC_V2) { if (kernel_symbol_exists("cache_chain")) { get_symbol_data("cache_chain", sizeof(ulong), &cache); cache_end = symbol_value("cache_chain"); } else if (kernel_symbol_exists("slab_caches")) { vt->flags |= KMALLOC_COMMON; get_symbol_data("slab_caches", sizeof(ulong), &cache); cache_end = symbol_value("slab_caches"); } else { error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); return; } cache -= next_offset; } else cache = cache_end = symbol_value("cache_cache"); if (!(pc->flags & RUNTIME)) { if (kmem_cache_downsize()) add_to_downsized("kmem_cache"); } cache_buf = GETBUF(SIZE(kmem_cache_s)); hq_open(); do { cache_count++; if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", RETURN_ON_ERROR)) { FREEBUF(cache_buf); vt->flags |= KMEM_CACHE_UNAVAIL; error(INFO, "%sunable to initialize kmem slab cache subsystem\n\n", DUMPFILE() ? "\n" : ""); hq_close(); return; } if (!hq_enter(cache)) { error(WARNING, "%sduplicate kmem_cache entry in cache list: %lx\n", DUMPFILE() ? "\n" : "", cache); error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); vt->flags |= KMEM_CACHE_UNAVAIL; hq_close(); return; } tmp = (ulong)(UINT(cache_buf + num_offset)); if (tmp > max_cnum) max_cnum = tmp; if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) max_limit = tmp; /* * Recognize and bail out on any max_cpudata_limit() failures. */ if (vt->flags & KMEM_CACHE_UNAVAIL) { FREEBUF(cache_buf); hq_close(); return; } if (tmp2 > max_cpus) max_cpus = tmp2; cache = ULONG(cache_buf + next_offset); switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { case PERCPU_KMALLOC_V1: cache -= next_offset; break; case PERCPU_KMALLOC_V2: if (cache != cache_end) cache -= next_offset; break; } } while (cache != cache_end); hq_close(); FREEBUF(cache_buf); vt->kmem_max_c_num = max_cnum; vt->kmem_max_limit = max_limit; vt->kmem_max_cpus = max_cpus; vt->kmem_cache_count = cache_count; if (CRASHDEBUG(2)) { fprintf(fp, "kmem_cache_init:\n"); fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); } if (!(vt->flags & KMEM_CACHE_INIT)) { if (vt->flags & PERCPU_KMALLOC_V1) ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, kmem_cache_s_name, "kmem_cache_s.name", NULL, sizeof(char)); else if (vt->flags & PERCPU_KMALLOC_V2) vt->kmem_cache_namelen = 0; else ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, kmem_cache_s_c_name, "kmem_cache_s.c_name", NULL, 0); } please_wait_done(); vt->flags |= KMEM_CACHE_INIT; } static ulong kmem_cache_nodelists(ulong cache) { ulong nodelists = 0; if (vt->flags & NODELISTS_IS_PTR) { /* * nodelists is pointer to the array */ if (!readmem(cache+OFFSET(kmem_cache_s_lists), KVADDR, &nodelists, sizeof(ulong), "nodelists pointer", RETURN_ON_ERROR)) error(WARNING, "cannot read kmem_cache nodelists pointer"); return nodelists; } else return cache+OFFSET(kmem_cache_s_lists); } static int kmem_cache_downsize(void) { char *cache_buf; ulong kmem_cache; uint buffer_size, object_size; int nr_node_ids; int nr_cpu_ids; if (vt->flags & KMALLOC_SLUB) { if (kernel_symbol_exists("kmem_cache") && VALID_MEMBER(kmem_cache_objsize) && try_get_symbol_data("kmem_cache", sizeof(ulong), &kmem_cache) && readmem(kmem_cache + OFFSET(kmem_cache_objsize), KVADDR, &object_size, sizeof(int), "kmem_cache objsize/object_size", RETURN_ON_ERROR)) { ASSIGN_SIZE(kmem_cache) = object_size; if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache)); } if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache)) return TRUE; else return FALSE; } if ((THIS_KERNEL_VERSION < LINUX(2,6,22)) || !(vt->flags & PERCPU_KMALLOC_V2_NODES) || (!kernel_symbol_exists("cache_cache") && !kernel_symbol_exists("kmem_cache_boot")) || (!MEMBER_EXISTS("kmem_cache", "buffer_size") && !MEMBER_EXISTS("kmem_cache", "size"))) { return FALSE; } if (vt->flags & NODELISTS_IS_PTR) { /* * More recent kernels have kmem_cache.array[] sized * by the number of cpus plus the number of nodes. */ if (kernel_symbol_exists("kmem_cache_boot") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("kmem_cache_boot") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "kmem_cache_boot object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else if (kernel_symbol_exists("cache_cache") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("cache_cache") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "cache_cache object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else object_size = 0; /* * Older kernels have kmem_cache.array[] sized by * the number of cpus; real value is nr_cpu_ids, * but fallback is kt->cpus. */ if (kernel_symbol_exists("nr_cpu_ids")) get_symbol_data("nr_cpu_ids", sizeof(int), &nr_cpu_ids); else nr_cpu_ids = kt->cpus; ARRAY_LENGTH(kmem_cache_s_array) = nr_cpu_ids; if (!object_size) ASSIGN_SIZE(kmem_cache_s) = OFFSET(kmem_cache_s_array) + sizeof(ulong) * nr_cpu_ids; if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache_s)); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } else if (vt->flags & SLAB_CPU_CACHE) { if (kernel_symbol_exists("kmem_cache_boot") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("kmem_cache_boot") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "kmem_cache_boot object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else { object_size = OFFSET(kmem_cache_node) + (sizeof(void *) * vt->kmem_cache_len_nodes); ASSIGN_SIZE(kmem_cache_s) = object_size; } if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache_s)); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } cache_buf = GETBUF(SIZE(kmem_cache_s)); if (!readmem(symbol_value("cache_cache"), KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", RETURN_ON_ERROR)) { FREEBUF(cache_buf); return FALSE; } buffer_size = UINT(cache_buf + MEMBER_OFFSET("kmem_cache", "buffer_size")); if (buffer_size < SIZE(kmem_cache_s)) { if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; if (buffer_size >= (uint)(OFFSET(kmem_cache_s_lists) + (sizeof(void *) * vt->kmem_cache_len_nodes))) ASSIGN_SIZE(kmem_cache_s) = buffer_size; else error(WARNING, "questionable cache_cache.buffer_size: %d\n", buffer_size); if (CRASHDEBUG(1)) { fprintf(fp, "\nkmem_cache_downsize: %ld to %d\n", STRUCT_SIZE("kmem_cache"), buffer_size); fprintf(fp, "kmem_cache_downsize: nr_node_ids: %ld\n", vt->kmem_cache_len_nodes); } FREEBUF(cache_buf); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } FREEBUF(cache_buf); return FALSE; } /* * Stash a list of presumably-corrupted slab cache addresses. */ static void mark_bad_slab_cache(ulong cache) { size_t sz; if (vt->nr_bad_slab_caches) { sz = sizeof(ulong) * (vt->nr_bad_slab_caches + 1); if (!(vt->bad_slab_caches = realloc(vt->bad_slab_caches, sz))) { error(INFO, "cannot realloc bad_slab_caches array\n"); vt->nr_bad_slab_caches = 0; return; } } else { if (!(vt->bad_slab_caches = (ulong *)malloc(sizeof(ulong)))) { error(INFO, "cannot malloc bad_slab_caches array\n"); return; } } vt->bad_slab_caches[vt->nr_bad_slab_caches++] = cache; } static int bad_slab_cache(ulong cache) { int i; for (i = 0; i < vt->nr_bad_slab_caches; i++) { if (vt->bad_slab_caches[i] == cache) return TRUE; } return FALSE; } /* * Determine the largest cpudata limit for a given cache. */ static ulong max_cpudata_limit(ulong cache, ulong *cpus) { int i; ulong cpudata[NR_CPUS]; int limit; ulong max_limit; ulong shared, percpu_ptr; ulong *start_address; if (vt->flags & PERCPU_KMALLOC_V2_NODES) goto kmem_cache_s_array_nodes; if (vt->flags & PERCPU_KMALLOC_V2) goto kmem_cache_s_array; if (INVALID_MEMBER(kmem_cache_s_cpudata)) { *cpus = 0; return 0; } if (!readmem(cache+OFFSET(kmem_cache_s_cpudata), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), "cpudata array", RETURN_ON_ERROR)) goto bail_out; for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && cpudata[i]; i++) { if (!readmem(cpudata[i]+OFFSET(cpucache_s_limit), KVADDR, &limit, sizeof(int), "cpucache limit", RETURN_ON_ERROR)) goto bail_out; if (limit > max_limit) max_limit = limit; } *cpus = i; return max_limit; kmem_cache_s_array: if (!readmem(cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), "array cache array", RETURN_ON_ERROR)) goto bail_out; for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && cpudata[i]; i++) { if (!readmem(cpudata[i]+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "array cache limit", RETURN_ON_ERROR)) goto bail_out; if (limit > max_limit) max_limit = limit; } /* * If the shared list can be accessed, check its size as well. */ if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && readmem(cache+OFFSET(kmem_cache_s_lists)+OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) && readmem(shared+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "shared array_cache limit", RETURN_ON_ERROR|QUIET)) { if (limit > max_limit) max_limit = limit; } *cpus = i; return max_limit; kmem_cache_s_array_nodes: if (CRASHDEBUG(3)) fprintf(fp, "kmem_cache: %lx\n", cache); if (vt->flags & SLAB_CPU_CACHE) { if (!readmem(cache+OFFSET(kmem_cache_cpu_cache), KVADDR, &percpu_ptr, sizeof(void *), "kmem_cache.cpu_cache", RETURN_ON_ERROR)) goto bail_out; for (i = 0; i < kt->cpus; i++) cpudata[i] = percpu_ptr + kt->__per_cpu_offset[i]; } else { if (!readmem(cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * MIN(NR_CPUS, ARRAY_LENGTH(kmem_cache_s_array)), "array cache array", RETURN_ON_ERROR)) goto bail_out; } for (i = max_limit = 0; i < kt->cpus; i++) { if (check_offline_cpu(i)) continue; if (!cpudata[i]) break; if (!readmem(cpudata[i]+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "array cache limit", RETURN_ON_ERROR)) { error(INFO, "kmem_cache: %lx: invalid array_cache pointer: %lx\n", cache, cpudata[i]); mark_bad_slab_cache(cache); return max_limit; } if (CRASHDEBUG(3)) fprintf(fp, " array limit[%d]: %d\n", i, limit); if ((unsigned int)limit > INT_MAX) error(INFO, "kmem_cache: %lx: invalid array limit[%d]: %d\n", cache, i, limit); else if (limit > max_limit) max_limit = limit; } *cpus = i; /* * Check the shared list of all the nodes. */ start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && readmem(kmem_cache_nodelists(cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) { for (i = 0; i < vt->kmem_cache_len_nodes; i++) { if (start_address[i] == 0) continue; if (readmem(start_address[i] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET)) { if (!shared) break; } else continue; if (readmem(shared + OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "shared array_cache limit", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(3)) fprintf(fp, " shared node limit[%d]: %d\n", i, limit); if ((unsigned int)limit > INT_MAX) error(INFO, "kmem_cache: %lx: shared node limit[%d]: %d\n", cache, i, limit); else if (limit > max_limit) max_limit = limit; break; } } } FREEBUF(start_address); return max_limit; bail_out: vt->flags |= KMEM_CACHE_UNAVAIL; error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); *cpus = 0; return 0; } /* * Determine whether the current slab cache is contained in * the comma-separated list from a "kmem -I list1,list2 ..." * command entry. */ static int ignore_cache(struct meminfo *si, char *name) { int i, argc; char *p1; char *arglist[MAXARGS]; char buf[BUFSIZE]; if (!si->ignore) return FALSE; strcpy(buf, si->ignore); p1 = buf; while (*p1) { if (*p1 == ',') *p1 = ' '; p1++; } argc = parse_line(buf, arglist); for (i = 0; i < argc; i++) { if (STREQ(name, arglist[i])) return TRUE; } return FALSE; } /* * dump_kmem_cache() displays basic information about kmalloc() slabs. * At this point, only kmem_cache_s structure data for each slab is dumped. * * TBD: Given a specified physical address, and determine which slab it came * from, and whether it's in use or not. */ #define SLAB_C_MAGIC 0x4F17A36DUL #define SLAB_MAGIC_ALLOC 0xA5C32F2BUL /* slab is alive */ #define SLAB_MAGIC_DESTROYED 0xB2F23C5AUL /* slab has been destroyed */ #define SLAB_CFLGS_BUFCTL 0x020000UL /* bufctls in own cache */ #define KMEM_SLAB_ADDR (1) #define KMEM_BUFCTL_ADDR (2) #define KMEM_OBJECT_ADDR_FREE (3) #define KMEM_OBJECT_ADDR_INUSE (4) #define KMEM_OBJECT_ADDR_CACHED (5) #define KMEM_ON_SLAB (6) #define KMEM_OBJECT_ADDR_SHARED (7) #define KMEM_SLAB_OVERLOAD_PAGE (8) #define KMEM_SLAB_FREELIST (9) #define DUMP_KMEM_CACHE_INFO_V1() \ { \ char b1[BUFSIZE]; \ fprintf(fp, "%s %-18s %8ld ", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache)), \ buf, si->size); \ fprintf(fp, "%9ld %8ld %5ld %3ldk\n", \ vt->flags & PERCPU_KMALLOC_V1 ? \ si->inuse - si->cpucached_cache : \ si->inuse, si->num_slabs * si->c_num, \ si->num_slabs, si->slabsize/1024); \ } #define DUMP_KMEM_CACHE_INFO_V2() dump_kmem_cache_info_v2(si) static void dump_kmem_cache_info_v2(struct meminfo *si) { char b1[BUFSIZE]; char b2[BUFSIZE]; int namelen, sizelen, spacelen; fprintf(fp, "%s ", mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); namelen = strlen(si->curname); sprintf(b2, "%ld", si->size); sizelen = strlen(b2); spacelen = 0; if (namelen++ > 18) { spacelen = 29 - namelen - sizelen; fprintf(fp, "%s%s%ld ", si->curname, space(spacelen <= 0 ? 1 : spacelen), si->size); if (spacelen > 0) spacelen = 1; sprintf(b1, "%c%dld ", '%', 9 + spacelen - 1); } else { fprintf(fp, "%-18s %8ld ", si->curname, si->size); sprintf(b1, "%c%dld ", '%', 9); } fprintf(fp, b1, vt->flags & (PERCPU_KMALLOC_V2) ? si->inuse - si->cpucached_cache : si->inuse); fprintf(fp, "%8ld %s%5ld %s%3ldk\n", si->num_slabs * si->c_num, si->num_slabs < 100000 ? " " : "", si->num_slabs, (si->slabsize/1024) < 1000 ? " " : "", si->slabsize/1024); } #define DUMP_SLAB_INFO() \ { \ char b1[BUFSIZE], b2[BUFSIZE]; \ ulong allocated, freeobjs, slab; \ if (vt->flags & SLAB_OVERLOAD_PAGE) \ slab = si->slab - OFFSET(page_lru); \ else \ slab = si->slab; \ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { \ allocated = si->s_inuse - si->cpucached_slab; \ freeobjs = si->c_num - allocated - si->cpucached_slab; \ } else { \ allocated = si->s_inuse; \ freeobjs = si->c_num - si->s_inuse; \ } \ fprintf(fp, "%s %s %5ld %9ld %4ld\n", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(slab)), \ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \ si->c_num, allocated, \ vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? \ freeobjs + si->cpucached_slab : freeobjs); \ } static void dump_kmem_cache(struct meminfo *si) { char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_cache; ulong name, magic; int cnt; char *p1; if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(FATAL, "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) { readmem(si->cache+OFFSET(kmem_cache_s_c_nextp), KVADDR, &cache_cache, sizeof(ulong), "kmem_cache next", FAULT_ON_ERROR); } else si->cache = cache_cache = symbol_value("cache_cache"); if (si->flags & ADDRESS_SPECIFIED) { if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; si->cache_buf = GETBUF(SIZE(kmem_cache_s)); do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", FAULT_ON_ERROR); if (vt->kmem_cache_namelen) { BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), buf, vt->kmem_cache_namelen); } else { name = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_name)); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); goto next_cache; } si->curname = buf; if (CRASHDEBUG(1)) fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); console("cache: %lx %s\n", si->cache, si->curname); magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); if (magic == SLAB_C_MAGIC) { si->size = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_org_size)); if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: c_org_size: %ld\n", si->curname, si->size); si->errors++; } } si->c_flags = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_flags)); si->c_offset = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_offset)); si->order = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_gfporder)); si->c_num = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_num)); do_slab_chain(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) DUMP_KMEM_CACHE_INFO_V1(); if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; do_slab_chain(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO_V1(); fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(ON-SLAB kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(ON-SLAB kmem_slab_t)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; } break; } } } else { error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", si->curname, magic); si->errors++; } next_cache: si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp)); } while (si->cache != cache_cache); FREEBUF(si->cache_buf); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); } /* * dump_kmem_cache() adapted for newer percpu slab format. */ static void dump_kmem_cache_percpu_v1(struct meminfo *si) { int i; char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_cache; ulong name; int cnt; uint tmp_val; /* Used as temporary variable to read sizeof(int) and assigned to ulong variable. We are doing this to mask the endian issue */ char *p1; if (!(vt->flags & PERCPU_KMALLOC_V1)) error(FATAL, "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); for (i = 0; i < vt->kmem_max_cpus; i++) si->cpudata[i] = (ulong *) GETBUF(vt->kmem_max_limit * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) { readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &cache_cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); } else si->cache = cache_cache = symbol_value("cache_cache"); if (si->flags & ADDRESS_SPECIFIED) { if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); if (vt->kmem_cache_namelen) { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, buf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, &name, sizeof(ulong), "name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); goto next_cache; } si->curname = buf; readmem(si->cache+OFFSET(kmem_cache_s_objsize), KVADDR, &tmp_val, sizeof(uint), "objsize", FAULT_ON_ERROR); si->size = (ulong)tmp_val; if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: objsize: %ld\n", si->curname, si->size); si->errors++; } } readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); si->c_flags = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_gfporder), KVADDR, &tmp_val, sizeof(uint), "gfporder", FAULT_ON_ERROR); si->order = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_num), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s num", FAULT_ON_ERROR); si->c_num = (ulong)tmp_val; do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { DUMP_KMEM_CACHE_INFO_V1(); if (CRASHDEBUG(3)) dump_struct("kmem_cache_s", si->cache, 0); } if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { gather_cpudata_list_v1(si); si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO_V1(); fprintf(fp, "%s", slab_hdr); gather_slab_cached_count(si); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp,"(kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(slab_s)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_CACHED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (cpu %d cache)\n", si->container ? si->container : (ulong)si->spec_addr, si->cpu); break; } break; } } next_cache: readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &si->cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); si->cache -= OFFSET(kmem_cache_s_next); } while (si->cache != cache_cache); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); FREEBUF(si->kmem_bufctl); for (i = 0; i < vt->kmem_max_cpus; i++) FREEBUF(si->cpudata[i]); } /* * Updated for 2.6 slab substructure. */ static void dump_kmem_cache_percpu_v2(struct meminfo *si) { int i; char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_end; ulong name, page_head; int cnt; uint tmp_val; /* Used as temporary variable to read sizeof(int) and assigned to ulong variable. We are doing this to mask the endian issue */ char *p1; if (!(vt->flags & PERCPU_KMALLOC_V2)) error(FATAL, "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); if (vt->flags & SLAB_OVERLOAD_PAGE) { si->freelist = si->kmem_bufctl; si->freelist_index_size = slab_freelist_index_size(); } for (i = 0; i < vt->kmem_max_cpus; i++) si->cpudata[i] = (ulong *) GETBUF(vt->kmem_max_limit * sizeof(ulong)); if(vt->flags & PERCPU_KMALLOC_V2_NODES) si->shared_array_cache = (ulong *) GETBUF(vt->kmem_cache_len_nodes * (vt->kmem_max_limit+1) * sizeof(ulong)); else si->shared_array_cache = (ulong *) GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &cache_end, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); else { if (vt->flags & KMALLOC_COMMON) { get_symbol_data("slab_caches", sizeof(ulong), &si->cache); si->cache -= OFFSET(kmem_cache_s_next); cache_end = symbol_value("slab_caches"); } else { get_symbol_data("cache_chain", sizeof(ulong), &si->cache); si->cache -= OFFSET(kmem_cache_s_next); cache_end = symbol_value("cache_chain"); } } if (si->flags & ADDRESS_SPECIFIED) { if ((p1 = is_slab_overload_page(si->spec_addr, &page_head, kbuf))) { si->flags |= SLAB_OVERLOAD_PAGE_PTR; si->spec_addr = page_head; } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); if (vt->kmem_cache_namelen) { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, buf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, &name, sizeof(ulong), "name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); goto next_cache; } if (bad_slab_cache(si->cache)) { fprintf(fp, "%lx %-18s [INVALID/CORRUPTED]\n", si->cache, buf); goto next_cache; } si->curname = buf; readmem(si->cache+OFFSET(kmem_cache_s_objsize), KVADDR, &tmp_val, sizeof(uint), "objsize", FAULT_ON_ERROR); si->size = (ulong)tmp_val; if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: objsize: %ld\n", si->curname, si->size); si->errors++; } } readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); si->c_flags = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_gfporder), KVADDR, &tmp_val, sizeof(uint), "gfporder", FAULT_ON_ERROR); si->order = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_num), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s num", FAULT_ON_ERROR); si->c_num = (ulong)tmp_val; if (vt->flags & PERCPU_KMALLOC_V2_NODES) { if (vt->flags & SLAB_OVERLOAD_PAGE) do_slab_chain_slab_overload_page(SLAB_GET_COUNTS, si); else do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si); } else do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { DUMP_KMEM_CACHE_INFO_V2(); if (CRASHDEBUG(3)) dump_struct("kmem_cache_s", si->cache, 0); } if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { if (!(vt->flags & PERCPU_KMALLOC_V2_NODES)) gather_cpudata_list_v2(si); si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; if (vt->flags & PERCPU_KMALLOC_V2_NODES) { if (vt->flags & SLAB_OVERLOAD_PAGE) do_slab_chain_slab_overload_page(SLAB_WALKTHROUGH, si); else do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si); } else do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO_V2(); fprintf(fp, "%s", slab_hdr); gather_slab_cached_count(si); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp,"(kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(slab)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_SLAB_FREELIST: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(on-slab freelist)\n"); break; case KMEM_SLAB_OVERLOAD_PAGE: si->flags &= ~ADDRESS_SPECIFIED; dump_slab_objects_percpu(si); si->flags |= ADDRESS_SPECIFIED; break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_CACHED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (cpu %d cache)\n", si->container ? si->container : (ulong)si->spec_addr, si->cpu); break; case KMEM_OBJECT_ADDR_SHARED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (shared cache)\n", si->container ? si->container : (ulong)si->spec_addr); break; } break; } } next_cache: readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &si->cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); if (si->cache != cache_end) si->cache -= OFFSET(kmem_cache_s_next); } while (si->cache != cache_end); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); FREEBUF(si->kmem_bufctl); for (i = 0; i < vt->kmem_max_cpus; i++) FREEBUF(si->cpudata[i]); FREEBUF(si->shared_array_cache); } /* * Walk through the slab chain hanging off a kmem_cache_s structure, * gathering basic statistics. * * TBD: Given a specified physical address, determine whether it's in this * slab chain, and whether it's in use or not. */ #define INSLAB(obj, si) \ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem) static void do_slab_chain(int cmd, struct meminfo *si) { ulong tmp, magic; ulong kmem_slab_end; char *kmem_slab_s_buf; si->slabsize = (power(2, si->order) * PAGESIZE()); kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); switch (cmd) { case SLAB_GET_COUNTS: si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); if (slab_data_saved(si)) return; si->num_slabs = si->inuse = 0; if (si->slab == kmem_slab_end) return; kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); do { if (received_SIGINT()) { FREEBUF(kmem_slab_s_buf); restart(0); } readmem(si->slab, KVADDR, kmem_slab_s_buf, SIZE(kmem_slab_s), "kmem_slab_s buffer", FAULT_ON_ERROR); magic = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_magic)); if (magic == SLAB_MAGIC_ALLOC) { tmp = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_inuse)); si->inuse += tmp; si->num_slabs++; } else { fprintf(fp, "\"%s\" cache: invalid s_magic: %lx\n", si->curname, magic); si->errors++; FREEBUF(kmem_slab_s_buf); return; } si->slab = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_nextp)); } while (si->slab != kmem_slab_end); FREEBUF(kmem_slab_s_buf); save_slab_data(si); break; case SLAB_WALKTHROUGH: if (!si->slab) si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); if (si->slab == kmem_slab_end) return; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); do { if (received_SIGINT()) { FREEBUF(kmem_slab_s_buf); restart(0); } readmem(si->slab, KVADDR, kmem_slab_s_buf, SIZE(kmem_slab_s), "kmem_slab_s buffer", FAULT_ON_ERROR); dump_slab(si); if (si->found) { FREEBUF(kmem_slab_s_buf); return; } si->slab = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_nextp)); } while (si->slab != kmem_slab_end); FREEBUF(kmem_slab_s_buf); break; } } /* * do_slab_chain() adapted for newer percpu slab format. */ #define SLAB_BASE(X) (PTOB(BTOP(X))) #define INSLAB_PERCPU(obj, si) \ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) #define SLAB_CHAINS (3) static char *slab_chain_name_v1[] = {"full", "partial", "free"}; static void do_slab_chain_percpu_v1(long cmd, struct meminfo *si) { int i, tmp, s; int list_borked; char *slab_s_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; list_borked = 0; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; if (VALID_MEMBER(kmem_cache_s_slabs)) { slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); slab_chains[1] = 0; slab_chains[2] = 0; } else { slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); } if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } switch (cmd) { case SLAB_GET_COUNTS: si->flags |= SLAB_GET_COUNTS; si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; gather_cpudata_list_v1(si); slab_s_buf = GETBUF(SIZE(slab_s)); for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v1[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_s_buf); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_s_buf); restart(0); } if (!verify_slab_v1(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_s_list); readmem(si->slab, KVADDR, slab_s_buf, SIZE(slab_s), "slab_s buffer", FAULT_ON_ERROR); tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); si->inuse += tmp; if (ACTIVE()) gather_cpudata_list_v1(si); si->s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_s_buf + OFFSET(slab_s_list)); si->slab -= OFFSET(slab_s_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v1[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } FREEBUF(slab_s_buf); if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= SLAB_WALKTHROUGH; si->flags &= ~SLAB_GET_COUNTS; for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v1[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) restart(0); if (!verify_slab_v1(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_s_list); dump_slab_percpu_v1(si); if (si->found) { return; } readmem(si->slab+OFFSET(slab_s_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_s_list); } while (si->slab != slab_chains[s] && !list_borked); } break; } } /* * Try to preclude any attempt to translate a bogus slab structure. */ static int verify_slab_v1(struct meminfo *si, ulong last, int s) { char slab_s_buf[BUFSIZE]; struct kernel_list_head *list_head; unsigned int inuse; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v1[s]; errcnt = 0; if (!readmem(si->slab, KVADDR, slab_s_buf, SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); return FALSE; } list_head = (struct kernel_list_head *) (slab_s_buf + OFFSET(slab_s_list)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); if (inuse > si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } if (!last) goto no_inuse_check_v1; switch (s) { case 0: /* full -- but can be one singular list */ if (VALID_MEMBER(kmem_cache_s_slabs_full) && (inuse != si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 1: /* partial */ if ((inuse == 0) || (inuse == si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 2: /* free */ if (inuse > 0) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; } no_inuse_check_v1: s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; return(errcnt ? FALSE : TRUE); } /* * Updated for 2.6 slab substructure. */ static char *slab_chain_name_v2[] = {"partial", "full", "free"}; static void do_slab_chain_percpu_v2(long cmd, struct meminfo *si) { int i, tmp, s; int list_borked; char *slab_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; list_borked = 0; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_full); slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_free); if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } switch (cmd) { case SLAB_GET_COUNTS: si->flags |= SLAB_GET_COUNTS; si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; gather_cpudata_list_v2(si); slab_buf = GETBUF(SIZE(slab)); for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_buf); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_buf); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); tmp = INT(slab_buf + OFFSET(slab_inuse)); si->inuse += tmp; if (ACTIVE()) gather_cpudata_list_v2(si); si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_buf + OFFSET(slab_list)); si->slab -= OFFSET(slab_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } FREEBUF(slab_buf); if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= SLAB_WALKTHROUGH; si->flags &= ~SLAB_GET_COUNTS; for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) restart(0); if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); dump_slab_percpu_v2(si); if (si->found) { return; } readmem(si->slab+OFFSET(slab_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_list); } while (si->slab != slab_chains[s] && !list_borked); } break; } } /* * Added To Traverse the Nodelists */ static void do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si) { int i, tmp, s, node; int list_borked; char *slab_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; ulong *start_address; int index; list_borked = 0; slab_buf = NULL; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) error(INFO, "cannot read kmem_cache nodelists array"); switch (cmd) { case SLAB_GET_COUNTS: si->flags |= (SLAB_GET_COUNTS|SLAB_FIRST_NODE); si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; slab_buf = GETBUF(SIZE(slab)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_buf); FREEBUF(start_address); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_buf); FREEBUF(start_address); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); tmp = INT(slab_buf + OFFSET(slab_inuse)); si->inuse += tmp; si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_buf + OFFSET(slab_list)); si->slab -= OFFSET(slab_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } } if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); si->flags &= ~SLAB_GET_COUNTS; slab_buf = GETBUF(SIZE(slab)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: " "bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) { FREEBUF(start_address); FREEBUF(slab_buf); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); dump_slab_percpu_v2(si); if (si->found) { FREEBUF(start_address); FREEBUF(slab_buf); return; } readmem(si->slab+OFFSET(slab_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_list); } while (si->slab != slab_chains[s] && !list_borked); } } break; } FREEBUF(slab_buf); FREEBUF(start_address); } static int slab_freelist_index_size(void) { struct datatype_member datatype, *dm; dm = &datatype; BZERO(dm, sizeof(*dm)); dm->name = "freelist_idx_t"; if (is_typedef(dm->name)) return DATATYPE_SIZE(dm); if (CRASHDEBUG(1)) error(INFO, "freelist_idx_t does not exist\n"); return sizeof(int); } static void do_slab_chain_slab_overload_page(long cmd, struct meminfo *si) { int i, tmp, s, node; int list_borked; char *page_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; ulong *start_address; int index; list_borked = 0; page_buf = NULL; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) error(INFO, "cannot read kmem_cache nodelists array"); switch (cmd) { case SLAB_GET_COUNTS: si->flags |= (SLAB_GET_COUNTS|SLAB_FIRST_NODE); si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; page_buf = GETBUF(SIZE(page)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad page/slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(page_buf); FREEBUF(start_address); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(page_buf); FREEBUF(start_address); restart(0); } if (!verify_slab_overload_page(si, last, s)) { list_borked = 1; continue; } last = si->slab; readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", FAULT_ON_ERROR); tmp = INT(page_buf + OFFSET(page_active)); si->inuse += tmp; si->s_mem = ULONG(page_buf + OFFSET(page_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(page_buf + OFFSET(page_lru)); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } } if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: if (si->flags & SLAB_OVERLOAD_PAGE_PTR) { specified_slab = si->spec_addr; si->slab = si->spec_addr + OFFSET(page_lru); } else { specified_slab = si->slab; if (si->slab) si->slab += OFFSET(page_lru); } si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); si->flags &= ~SLAB_GET_COUNTS; page_buf = GETBUF(SIZE(page)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: " "bad page/slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", FAULT_ON_ERROR); si->s_mem = ULONG(page_buf + OFFSET(page_s_mem)); if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) { FREEBUF(start_address); FREEBUF(page_buf); restart(0); } if (!verify_slab_overload_page(si, last, s)) { list_borked = 1; continue; } last = si->slab; dump_slab_overload_page(si); if (si->found) { FREEBUF(start_address); FREEBUF(page_buf); return; } readmem(si->slab, KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); } while (si->slab != slab_chains[s] && !list_borked); } } break; } FREEBUF(page_buf); FREEBUF(start_address); } /* * Try to preclude any attempt to translate a bogus slab structure. */ static int verify_slab_v2(struct meminfo *si, ulong last, int s) { char slab_buf[BUFSIZE]; struct kernel_list_head *list_head; unsigned int inuse; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v2[s]; errcnt = 0; if (!readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); return FALSE; } list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } inuse = UINT(slab_buf + OFFSET(slab_inuse)); if (inuse > si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } if (!last) goto no_inuse_check_v2; switch (s) { case 0: /* partial */ if ((inuse == 0) || (inuse == si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 1: /* full */ if (inuse != si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 2: /* free */ if (inuse > 0) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; } no_inuse_check_v2: s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; return(errcnt ? FALSE : TRUE); } static int verify_slab_overload_page(struct meminfo *si, ulong last, int s) { char *page_buf; struct kernel_list_head *list_head; unsigned int active; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v2[s]; page_buf = GETBUF(SIZE(page)); errcnt = 0; if (!readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); FREEBUF(page_buf); return FALSE; } list_head = (struct kernel_list_head *)(page_buf + OFFSET(page_lru)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: page/slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: page/slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } active = UINT(page_buf + OFFSET(page_active)); if (active > si->c_num) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } if (!last) goto no_inuse_check_v2; switch (s) { case 0: /* partial */ if ((active == 0) || (active == si->c_num)) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; case 1: /* full */ if (active != si->c_num) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; case 2: /* free */ if (active > 0) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; } no_inuse_check_v2: s_mem = ULONG(page_buf + OFFSET(page_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: page/slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; FREEBUF(page_buf); return(errcnt ? FALSE : TRUE); } /* * If it's a dumpfile, save the essential slab data to avoid re-reading * the whole slab chain more than once. This may seem like overkill, but * if the problem is a memory leak, or just the over-use of the buffer_head * cache, it's painful to wait each time subsequent kmem -s or -i commands * simply need the basic slab counts. */ struct slab_data { ulong cache_addr; int num_slabs; int inuse; ulong cpucached_cache; }; #define NO_SLAB_DATA ((void *)(-1)) static void save_slab_data(struct meminfo *si) { int i; if (si->flags & SLAB_DATA_NOSAVE) { si->flags &= ~SLAB_DATA_NOSAVE; return; } if (ACTIVE()) return; if (vt->slab_data == NO_SLAB_DATA) return; if (!vt->slab_data) { if (!(vt->slab_data = (struct slab_data *) malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { error(INFO, "cannot malloc slab_data table"); vt->slab_data = NO_SLAB_DATA; return; } for (i = 0; i < vt->kmem_cache_count; i++) { vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; vt->slab_data[i].num_slabs = 0; vt->slab_data[i].inuse = 0; vt->slab_data[i].cpucached_cache = 0; } } for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == si->cache) break; if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { vt->slab_data[i].cache_addr = si->cache; vt->slab_data[i].num_slabs = si->num_slabs; vt->slab_data[i].inuse = si->inuse; vt->slab_data[i].cpucached_cache = si->cpucached_cache; break; } } } static int slab_data_saved(struct meminfo *si) { int i; if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) return FALSE; for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == si->cache) { si->inuse = vt->slab_data[i].inuse; si->num_slabs = vt->slab_data[i].num_slabs; si->cpucached_cache = vt->slab_data[i].cpucached_cache; return TRUE; } } return FALSE; } static void dump_saved_slab_data(void) { int i; if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) return; for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) break; fprintf(fp, " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", vt->slab_data[i].cache_addr, vt->slab_data[i].inuse, vt->slab_data[i].num_slabs, vt->slab_data[i].cpucached_cache); } } /* * Dump the contents of a kmem slab. */ static void dump_slab(struct meminfo *si) { si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); si->s_mem = PTOB(BTOP(si->s_mem)); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))) { si->found = KMEM_SLAB_ADDR; return; } if (INSLAB(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects(si); } /* * dump_slab() adapted for newer percpu slab format. */ static void dump_slab_percpu_v1(struct meminfo *si) { int tmp; readmem(si->slab+OFFSET(slab_s_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "s_mem", FAULT_ON_ERROR); /* * Include the array of kmem_bufctl_t's appended to slab. */ tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB_PERCPU(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+tmp))) { if (si->spec_addr >= (si->slab + SIZE(slab_s))) si->found = KMEM_BUFCTL_ADDR; else si->found = KMEM_SLAB_ADDR; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(si->slab+OFFSET(slab_s_inuse), KVADDR, &tmp, sizeof(int), "inuse", FAULT_ON_ERROR); si->s_inuse = tmp; readmem(si->slab+OFFSET(slab_s_free), KVADDR, &si->free, SIZE(kmem_bufctl_t), "kmem_bufctl_t", FAULT_ON_ERROR); gather_slab_free_list_percpu(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } /* * Updated for 2.6 slab substructure. */ static void dump_slab_percpu_v2(struct meminfo *si) { int tmp; readmem(si->slab+OFFSET(slab_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "s_mem", FAULT_ON_ERROR); /* * Include the array of kmem_bufctl_t's appended to slab. */ tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB_PERCPU(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+tmp))) { if (si->spec_addr >= (si->slab + SIZE(slab))) si->found = KMEM_BUFCTL_ADDR; else si->found = KMEM_SLAB_ADDR; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(si->slab+OFFSET(slab_inuse), KVADDR, &tmp, sizeof(int), "inuse", FAULT_ON_ERROR); si->s_inuse = tmp; readmem(si->slab+OFFSET(slab_free), KVADDR, &si->free, SIZE(kmem_bufctl_t), "kmem_bufctl_t", FAULT_ON_ERROR); gather_slab_free_list_percpu(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } static void dump_slab_overload_page(struct meminfo *si) { int tmp; ulong slab_overload_page, freelist; slab_overload_page = si->slab - OFFSET(page_lru); readmem(slab_overload_page + OFFSET(page_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "page.s_mem", FAULT_ON_ERROR); readmem(slab_overload_page + OFFSET(page_freelist), KVADDR, &freelist, sizeof(ulong), "page.freelist", FAULT_ON_ERROR); if (si->flags & ADDRESS_SPECIFIED) { if ((si->spec_addr >= slab_overload_page) && (si->spec_addr < (slab_overload_page+SIZE(page)))) { si->found = KMEM_SLAB_OVERLOAD_PAGE; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(slab_overload_page + OFFSET(page_active), KVADDR, &tmp, sizeof(int), "active", FAULT_ON_ERROR); si->s_inuse = tmp; gather_slab_free_list_slab_overload_page(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } /* * Gather the free objects in a slab into the si->addrlist, checking for * specified addresses that are in-slab kmem_bufctls, and making error checks * along the way. Object address checks are deferred to dump_slab_objects(). */ #define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) static void gather_slab_free_list(struct meminfo *si) { ulong *next, obj; ulong expected, cnt; BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); if (!si->s_freep) return; cnt = 0; expected = si->c_num - si->s_inuse; next = si->s_freep; do { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } /* * Off-slab kmem_bufctls are contained in arrays of object * pointers that point to: * 1. next kmem_bufctl (or NULL) if the object is free. * 2. to the object if it the object is in use. * * On-slab kmem_bufctls resides just after the object itself, * and point to: * 1. next kmem_bufctl (or NULL) if object is free. * 2. the containing slab if the object is in use. */ if (si->c_flags & SLAB_CFLGS_BUFCTL) obj = si->s_mem + ((next - si->s_index) * si->c_offset); else obj = (ulong)next - si->c_offset; si->addrlist[cnt] = obj; if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB(next, si) && (si->spec_addr >= (ulong)next) && (si->spec_addr < (ulong)(next + 1))) { si->found = KMEM_BUFCTL_ADDR; return; } } cnt++; if (!INSLAB(obj, si)) { error(INFO, "\"%s\" cache: address not contained within slab: %lx\n", si->curname, obj); si->errors++; } readmem((ulong)next, KVADDR, &next, sizeof(void *), "s_freep chain entry", FAULT_ON_ERROR); } while (next); if (cnt != expected) { error(INFO, "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } /* * gather_slab_free_list() adapted for newer percpu slab format. */ #define BUFCTL_END 0xffffFFFF static void gather_slab_free_list_percpu(struct meminfo *si) { int i; ulong obj; ulong expected, cnt; int free_index; ulong kmembp; short *kbp; BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); if (CRASHDEBUG(1)) fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", si->slab, si->s_inuse, si->c_num); if (si->s_inuse == si->c_num ) return; kmembp = si->slab + SIZE_OPTION(slab_s, slab); readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, SIZE(kmem_bufctl_t) * si->c_num, "kmem_bufctl array", FAULT_ON_ERROR); if (CRASHDEBUG(1)) { for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && (i < si->c_num); i++) fprintf(fp, "%d ", si->kmem_bufctl[i]); for (kbp = (short *)&si->kmem_bufctl[0], i = 0; (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); i++) fprintf(fp, "%d ", *(kbp + i)); fprintf(fp, "\n"); } cnt = 0; expected = si->c_num - si->s_inuse; if (SIZE(kmem_bufctl_t) == sizeof(int)) { for (free_index = si->free; free_index != BUFCTL_END; free_index = si->kmem_bufctl[free_index]) { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } obj = si->s_mem + (free_index*si->size); si->addrlist[cnt] = obj; cnt++; } } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { kbp = (short *)&si->kmem_bufctl[0]; for (free_index = si->free; free_index != BUFCTL_END; free_index = (int)*(kbp + free_index)) { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } obj = si->s_mem + (free_index*si->size); si->addrlist[cnt] = obj; cnt++; } } else error(FATAL, "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", SIZE(kmem_bufctl_t)); if (cnt != expected) { error(INFO, "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } static void gather_slab_free_list_slab_overload_page(struct meminfo *si) { int i, active; ulong obj, objnr, cnt, freelist; unsigned char *ucharptr; unsigned short *ushortptr; unsigned int *uintptr; if (CRASHDEBUG(1)) fprintf(fp, "slab page: %lx active: %ld si->c_num: %ld\n", si->slab - OFFSET(page_lru), si->s_inuse, si->c_num); if (si->s_inuse == si->c_num ) return; readmem(si->slab - OFFSET(page_lru) + OFFSET(page_freelist), KVADDR, &freelist, sizeof(void *), "page freelist", FAULT_ON_ERROR); readmem(freelist, KVADDR, si->freelist, si->freelist_index_size * si->c_num, "freelist array", FAULT_ON_ERROR); BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); cnt = objnr = 0; ucharptr = NULL; ushortptr = NULL; uintptr = NULL; active = si->s_inuse; switch (si->freelist_index_size) { case 1: ucharptr = (unsigned char *)si->freelist; break; case 2: ushortptr = (unsigned short *)si->freelist; break; case 4: uintptr = (unsigned int *)si->freelist; break; } for (i = 0; i < si->c_num; i++) { switch (si->freelist_index_size) { case 1: objnr = (ulong)*ucharptr++; break; case 2: objnr = (ulong)*ushortptr++; break; case 4: objnr = (ulong)*uintptr++; break; } if (objnr >= si->c_num) { error(INFO, "\"%s\" cache: invalid/corrupt freelist entry: %ld\n", si->curname, objnr); si->errors++; } if (i >= active) { obj = si->s_mem + (objnr * si->size); si->addrlist[cnt++] = obj; if (CRASHDEBUG(1)) fprintf(fp, "%ld ", objnr); } else if (CRASHDEBUG(1)) fprintf(fp, "[%ld] ", objnr); } if (CRASHDEBUG(1)) fprintf(fp, "\n"); } /* * Dump the FREE, [ALLOCATED] and objects of a slab. */ #define DUMP_SLAB_OBJECT() \ for (j = on_free_list = 0; j < si->c_num; j++) { \ if (obj == si->addrlist[j]) { \ on_free_list = TRUE; \ break; \ } \ } \ \ if (on_free_list) { \ if (!(si->flags & ADDRESS_SPECIFIED)) \ fprintf(fp, " %lx\n", obj); \ if (si->flags & ADDRESS_SPECIFIED) { \ if (INOBJECT(si->spec_addr, obj)) { \ si->found = \ KMEM_OBJECT_ADDR_FREE; \ si->container = obj; \ return; \ } \ } \ } else { \ if (!(si->flags & ADDRESS_SPECIFIED)) \ fprintf(fp, " [%lx]\n", obj); \ cnt++; \ if (si->flags & ADDRESS_SPECIFIED) { \ if (INOBJECT(si->spec_addr, obj)) { \ si->found = \ KMEM_OBJECT_ADDR_INUSE; \ si->container = obj; \ return; \ } \ } \ } static void dump_slab_objects(struct meminfo *si) { int i, j; ulong *next; int on_free_list; ulong cnt, expected; ulong bufctl, obj; gather_slab_free_list(si); if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) return; cnt = 0; expected = si->s_inuse; si->container = 0; if (CRASHDEBUG(1)) for (i = 0; i < si->c_num; i++) { fprintf(fp, "si->addrlist[%d]: %lx\n", i, si->addrlist[i]); } if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s", free_inuse_hdr); /* For on-slab bufctls, c_offset is the distance between the start of * an obj and its related bufctl. For off-slab bufctls, c_offset is * the distance between objs in the slab. */ if (si->c_flags & SLAB_CFLGS_BUFCTL) { for (i = 0, next = si->s_index; i < si->c_num; i++, next++) { obj = si->s_mem + ((next - si->s_index) * si->c_offset); DUMP_SLAB_OBJECT(); } } else { /* * Get the "real" s_mem, i.e., without the offset stripped off. * It contains the address of the first object. */ readmem(si->slab+OFFSET(kmem_slab_s_s_mem), KVADDR, &obj, sizeof(ulong), "s_mem", FAULT_ON_ERROR); for (i = 0; i < si->c_num; i++) { DUMP_SLAB_OBJECT(); if (si->flags & ADDRESS_SPECIFIED) { bufctl = obj + si->c_offset; if ((si->spec_addr >= bufctl) && (si->spec_addr < (bufctl + SIZE(kmem_bufctl_t)))) { si->found = KMEM_BUFCTL_ADDR; return; } } obj += (si->c_offset + SIZE(kmem_bufctl_t)); } } if (cnt != expected) { error(INFO, "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } /* * dump_slab_objects() adapted for newer percpu slab format. */ static void dump_slab_objects_percpu(struct meminfo *si) { int i, j; int on_free_list, on_cpudata_list, on_shared_list; ulong cnt, expected; ulong obj, freelist; if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) if (!(si->found & KMEM_SLAB_OVERLOAD_PAGE)) return; cnt = 0; expected = si->s_inuse; si->container = 0; if (CRASHDEBUG(1)) for (i = 0; i < si->c_num; i++) { fprintf(fp, "si->addrlist[%d]: %lx\n", i, si->addrlist[i]); } if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s", free_inuse_hdr); for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { on_free_list = FALSE; on_cpudata_list = FALSE; on_shared_list = FALSE; for (j = 0; j < si->c_num; j++) { if (obj == si->addrlist[j]) { on_free_list = TRUE; break; } } on_cpudata_list = check_cpudata_list(si, obj); on_shared_list = check_shared_list(si, obj); if (on_free_list && on_cpudata_list) { error(INFO, "\"%s\" cache: object %lx on both free and cpu %d lists\n", si->curname, obj, si->cpu); si->errors++; } if (on_free_list && on_shared_list) { error(INFO, "\"%s\" cache: object %lx on both free and shared lists\n", si->curname, obj); si->errors++; } if (on_cpudata_list && on_shared_list) { error(INFO, "\"%s\" cache: object %lx on both cpu %d and shared lists\n", si->curname, obj, si->cpu); si->errors++; } if (on_free_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx\n", obj); if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_FREE; si->container = obj; return; } } } else if (on_cpudata_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx (cpu %d cache)\n", obj, si->cpu); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_CACHED; si->container = obj; return; } } } else if (on_shared_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx (shared cache)\n", obj); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_SHARED; si->container = obj; return; } } } else { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " [%lx]\n", obj); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_INUSE; si->container = obj; return; } } } } if (cnt != expected) { error(INFO, "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } if ((si->flags & ADDRESS_SPECIFIED) && (vt->flags & SLAB_OVERLOAD_PAGE)) { readmem(si->slab - OFFSET(page_lru) + OFFSET(page_freelist), KVADDR, &freelist, sizeof(ulong), "page.freelist", FAULT_ON_ERROR); if ((si->spec_addr >= freelist) && (si->spec_addr < si->s_mem)) si->found = KMEM_SLAB_FREELIST; } } /* * Determine how many of the "inuse" slab objects are actually cached * in the kmem_cache_s header. Set the per-slab count and update the * cumulative per-cache count. With the addition of the shared list * check, the terms "cpucached_cache" and "cpucached_slab" are somewhat * misleading. But they both are types of objects that are cached * in the kmem_cache_s header, just not necessarily per-cpu. */ static void gather_slab_cached_count(struct meminfo *si) { int i; ulong obj; int in_cpudata, in_shared; si->cpucached_slab = 0; for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { in_cpudata = in_shared = 0; if (check_cpudata_list(si, obj)) { in_cpudata = TRUE; si->cpucached_slab++; if (si->flags & SLAB_GET_COUNTS) { si->cpucached_cache++; } } if (check_shared_list(si, obj)) { in_shared = TRUE; if (!in_cpudata) { si->cpucached_slab++; if (si->flags & SLAB_GET_COUNTS) { si->cpucached_cache++; } } } if (in_cpudata && in_shared) { si->flags |= SLAB_DATA_NOSAVE; if (!(si->flags & VERBOSE)) error(INFO, "\"%s\" cache: object %lx on both cpu %d and shared lists\n", si->curname, obj, si->cpu); } } } /* * Populate the percpu object list for a given slab. */ static void gather_cpudata_list_v1(struct meminfo *si) { int i, j; int avail; ulong cpudata[NR_CPUS]; if (INVALID_MEMBER(kmem_cache_s_cpudata)) return; readmem(si->cache+OFFSET(kmem_cache_s_cpudata), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), "cpudata array", FAULT_ON_ERROR); for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && cpudata[i]; i++) { BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); readmem(cpudata[i]+OFFSET(cpucache_s_avail), KVADDR, &avail, sizeof(int), "cpucache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(cpucache_s), KVADDR, si->cpudata[i], sizeof(void *) * avail, "cpucache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx\n", si->cpudata[i][j]); } } /* * Updated for 2.6 slab percpu data structure, this also gathers * the shared array_cache list as well. */ static void gather_cpudata_list_v2(struct meminfo *si) { int i, j; int avail; ulong cpudata[NR_CPUS]; ulong shared; readmem(si->cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), "array_cache array", FAULT_ON_ERROR); for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && cpudata[i]; i++) { BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); readmem(cpudata[i]+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "array cache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(array_cache), KVADDR, si->cpudata[i], sizeof(void *) * avail, "array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); } /* * If the shared list contains anything, gather them as well. */ BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit); if (!VALID_MEMBER(kmem_list3_shared) || !VALID_MEMBER(kmem_cache_s_lists) || !readmem(si->cache+OFFSET(kmem_cache_s_lists)+ OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !readmem(shared+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) return; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; return; } if (CRASHDEBUG(2)) fprintf(fp, "%s: shared avail: %d\n", si->curname, avail); readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache, sizeof(void *) * avail, "shared array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); } /* * Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache */ static void gather_cpudata_list_v2_nodes(struct meminfo *si, int index) { int i, j; int avail; ulong cpudata[NR_CPUS]; ulong shared, percpu_ptr; ulong *start_address; start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (vt->flags & SLAB_CPU_CACHE) { readmem(si->cache+OFFSET(kmem_cache_cpu_cache), KVADDR, &percpu_ptr, sizeof(void *), "kmem_cache.cpu_cache", FAULT_ON_ERROR); for (i = 0; i < vt->kmem_max_cpus; i++) cpudata[i] = percpu_ptr + kt->__per_cpu_offset[i]; } else { readmem(si->cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * vt->kmem_max_cpus, "array_cache array", FAULT_ON_ERROR); } for (i = 0; (i < vt->kmem_max_cpus) && cpudata[i] && !(index); i++) { if (si->cpudata[i]) BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); else continue; readmem(cpudata[i]+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "array cache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; continue; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(array_cache), KVADDR, si->cpudata[i], sizeof(void *) * avail, "array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); } /* * If the shared list contains anything, gather them as well. */ if (si->flags & SLAB_FIRST_NODE) { BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit * vt->kmem_cache_len_nodes); si->current_cache_index = 0; } if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes , "array nodelist array", RETURN_ON_ERROR) || !readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !shared || !readmem(shared + OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) { FREEBUF(start_address); return; } if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; FREEBUF(start_address); return; } if (CRASHDEBUG(2)) fprintf(fp, "%s: shared avail: %d\n", si->curname, avail); readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index, sizeof(void *) * avail, "shared array_cache avail", FAULT_ON_ERROR); if ((si->current_cache_index + avail) > (vt->kmem_max_limit * vt->kmem_cache_len_nodes)) { error(INFO, "\"%s\" cache: total shared array_cache.avail %d greater than total limit %ld\n", si->curname, si->current_cache_index + avail, vt->kmem_max_limit * vt->kmem_cache_len_nodes); si->errors++; FREEBUF(start_address); return; } if (CRASHDEBUG(2)) for (j = si->current_cache_index; j < (si->current_cache_index + avail); j++) fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); si->current_cache_index += avail; FREEBUF(start_address); } /* * Check whether a given address is contained in the previously-gathered * percpu object cache. */ static int check_cpudata_list(struct meminfo *si, ulong obj) { int i, j; for (i = 0; i < vt->kmem_max_cpus; i++) { for (j = 0; si->cpudata[i][j]; j++) if (si->cpudata[i][j] == obj) { si->cpu = i; return TRUE; } } return FALSE; } /* * Check whether a given address is contained in the previously-gathered * shared object cache. */ static int check_shared_list(struct meminfo *si, ulong obj) { int i; if (INVALID_MEMBER(kmem_list3_shared) || !si->shared_array_cache) return FALSE; for (i = 0; si->shared_array_cache[i]; i++) { if (si->shared_array_cache[i] == obj) return TRUE; } return FALSE; } /* * Search the various memory subsystems for instances of this address. * Start with the most specific areas, ending up with at least the * mem_map page data. */ static void kmem_search(struct meminfo *mi) { struct syment *sp; struct meminfo tmp_meminfo; char buf[BUFSIZE]; ulong vaddr, orig_flags; physaddr_t paddr; ulong offset; ulong task; ulong show_flags; struct task_context *tc; vaddr = 0; pc->curcmd_flags &= ~HEADER_PRINTED; pc->curcmd_flags |= IGNORE_ERRORS; switch (mi->memtype) { case KVADDR: vaddr = mi->spec_addr; break; case PHYSADDR: vaddr = mi->spec_addr < VTOP(vt->high_memory) ? PTOV(mi->spec_addr) : BADADDR; break; } orig_flags = mi->flags; mi->retval = 0; /* * Check first for a possible symbolic display of the virtual * address associated with mi->spec_addr or PTOV(mi->spec_addr). */ if (((vaddr >= kt->stext) && (vaddr <= kt->end)) || IS_MODULE_VADDR(mi->spec_addr)) { if ((sp = value_search(vaddr, &offset))) { show_flags = SHOW_LINENUM | SHOW_RADIX(); if (module_symbol(sp->value, NULL, NULL, NULL, 0)) show_flags |= SHOW_MODULE; show_symbol(sp, offset, show_flags); fprintf(fp, "\n"); } } /* * Check for a valid mapped address. */ if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) { if (kvtop(NULL, mi->spec_addr, &paddr, 0)) { mi->flags = orig_flags | VMLIST_VERIFY; dump_vmlist(mi); if (mi->retval) { mi->flags = orig_flags; dump_vmlist(mi); fprintf(fp, "\n"); mi->spec_addr = paddr; mi->memtype = PHYSADDR; goto mem_map; } } } /* * If the address is physical, check whether it's in vmalloc space. */ if (mi->memtype == PHYSADDR) { mi->flags = orig_flags; mi->flags |= GET_PHYS_TO_VMALLOC; mi->retval = 0; dump_vmlist(mi); mi->flags &= ~GET_PHYS_TO_VMALLOC; if (mi->retval) { if ((sp = value_search(mi->retval, &offset))) { show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX()); fprintf(fp, "\n"); } dump_vmlist(mi); fprintf(fp, "\n"); goto mem_map; } } /* * Check whether the containing page belongs to the slab subsystem. */ mi->flags = orig_flags; mi->retval = 0; if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf, VERBOSE)) { BZERO(&tmp_meminfo, sizeof(struct meminfo)); tmp_meminfo.spec_addr = vaddr; tmp_meminfo.memtype = KVADDR; tmp_meminfo.flags = mi->flags; vt->dump_kmem_cache(&tmp_meminfo); fprintf(fp, "\n"); } if ((vaddr != BADADDR) && is_slab_page(mi, buf)) { BZERO(&tmp_meminfo, sizeof(struct meminfo)); tmp_meminfo.spec_addr = vaddr; tmp_meminfo.memtype = KVADDR; tmp_meminfo.flags = mi->flags; vt->dump_kmem_cache(&tmp_meminfo); fprintf(fp, "\n"); } /* * Check free list. */ mi->flags = orig_flags; mi->retval = 0; vt->dump_free_pages(mi); if (mi->retval) fprintf(fp, "\n"); if (vt->page_hash_table) { /* * Check the page cache. */ mi->flags = orig_flags; mi->retval = 0; dump_page_hash_table(mi); if (mi->retval) fprintf(fp, "\n"); } /* * Check whether it's a current task or stack address. */ if ((mi->memtype == KVADDR) && (task = vaddr_in_task_struct(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } else if ((mi->memtype == KVADDR) && (task = stkptr_to_task(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } mem_map: mi->flags = orig_flags; pc->curcmd_flags &= ~HEADER_PRINTED; if (vaddr != BADADDR) dump_mem_map(mi); else mi->retval = FALSE; if (!mi->retval) fprintf(fp, "%llx: %s address not found in mem map\n", mi->spec_addr, memtype_string(mi->memtype, 0)); } /* * Determine whether an address is a page pointer from the mem_map[] array. * If the caller requests it, return the associated physical address. */ int is_page_ptr(ulong addr, physaddr_t *phys) { int n; ulong ppstart, ppend; struct node_table *nt; ulong pgnum, node_size; ulong nr, sec_addr; ulong nr_mem_sections; ulong coded_mem_map, mem_map, end_mem_map; physaddr_t section_paddr; if (IS_SPARSEMEM()) { nr_mem_sections = NR_MEM_SECTIONS(); for (nr = 0; nr < nr_mem_sections ; nr++) { if ((sec_addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(sec_addr); mem_map = sparse_decode_mem_map(coded_mem_map, nr); end_mem_map = mem_map + (PAGES_PER_SECTION() * SIZE(page)); if ((addr >= mem_map) && (addr < end_mem_map)) { if ((addr - mem_map) % SIZE(page)) return FALSE; if (phys) { section_paddr = PTOB(section_nr_to_pfn(nr)); pgnum = (addr - mem_map) / SIZE(page); *phys = section_paddr + ((physaddr_t)pgnum * PAGESIZE()); } return TRUE; } } } return FALSE; } for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; ppstart = nt->mem_map; ppend = ppstart + (node_size * SIZE(page)); if ((addr < ppstart) || (addr >= ppend)) continue; /* * We're in the mem_map range -- but it is a page pointer? */ if ((addr - ppstart) % SIZE(page)) return FALSE; if (phys) { pgnum = (addr - nt->mem_map) / SIZE(page); *phys = ((physaddr_t)pgnum * PAGESIZE()) + nt->start_paddr; } return TRUE; } return FALSE; #ifdef PRE_NODES ppstart = vt->mem_map; ppend = ppstart + (vt->total_pages * vt->page_struct_len); if ((addr < ppstart) || (addr >= ppend)) return FALSE; if ((addr - ppstart) % vt->page_struct_len) return FALSE; return TRUE; #endif } /* * Return the physical address associated with this page pointer. */ static int page_to_phys(ulong pp, physaddr_t *phys) { return(is_page_ptr(pp, phys)); } /* * Return the page pointer associated with this physical address. */ int phys_to_page(physaddr_t phys, ulong *pp) { int n; ulong pgnum; struct node_table *nt; physaddr_t pstart, pend; ulong node_size; if (IS_SPARSEMEM()) { ulong map; map = pfn_to_map(phys >> PAGESHIFT()); if (map) { *pp = map; return TRUE; } return FALSE; } for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; pstart = nt->start_paddr; pend = pstart + ((ulonglong)node_size * PAGESIZE()); if ((phys < pstart) || (phys >= pend)) continue; /* * We're in the physical range -- calculate the page. */ pgnum = BTOP(phys - pstart); *pp = nt->mem_map + (pgnum * SIZE(page)); return TRUE; } return FALSE; #ifdef PRE_NODES if (phys >= (vt->total_pages * PAGESIZE())) return FALSE; pgnum = PTOB(BTOP(phys)) / PAGESIZE(); *pp = vt->mem_map + (pgnum * vt->page_struct_len); return TRUE; #endif } /* * Fill the caller's buffer with up to maxlen non-NULL bytes * starting from kvaddr, returning the number of consecutive * non-NULL bytes found. If the buffer gets filled with * maxlen bytes without a NULL, then the caller is reponsible * for handling it. */ int read_string(ulong kvaddr, char *buf, int maxlen) { int i; BZERO(buf, maxlen); readmem(kvaddr, KVADDR, buf, maxlen, "read_string characters", QUIET|RETURN_ON_ERROR); for (i = 0; i < maxlen; i++) { if (buf[i] == NULLCHAR) { BZERO(&buf[i], maxlen-i); break; } } return i; } /* * "help -v" output */ void dump_vm_table(int verbose) { int i; struct node_table *nt; int others; ulong *up; others = 0; fprintf(fp, " flags: %lx %s(", vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : ""); if (vt->flags & NODES) fprintf(fp, "%sNODES", others++ ? "|" : ""); if (vt->flags & NODES_ONLINE) fprintf(fp, "%sNODES_ONLINE", others++ ? "|" : ""); if (vt->flags & ZONES) fprintf(fp, "%sZONES", others++ ? "|" : ""); if (vt->flags & PERCPU_KMALLOC_V1) fprintf(fp, "%sPERCPU_KMALLOC_V1", others++ ? "|" : ""); if (vt->flags & PERCPU_KMALLOC_V2) fprintf(fp, "%sPERCPU_KMALLOC_V2", others++ ? "|" : ""); if (vt->flags & COMMON_VADDR) fprintf(fp, "%sCOMMON_VADDR", others++ ? "|" : ""); if (vt->flags & KMEM_CACHE_INIT) fprintf(fp, "%sKMEM_CACHE_INIT", others++ ? "|" : ""); if (vt->flags & V_MEM_MAP) fprintf(fp, "%sV_MEM_MAP", others++ ? "|" : ""); if (vt->flags & KMEM_CACHE_UNAVAIL) fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : ""); if (vt->flags & DISCONTIGMEM) fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : ""); if (vt->flags & FLATMEM) fprintf(fp, "%sFLATMEM", others++ ? "|" : ""); if (vt->flags & SPARSEMEM) fprintf(fp, "%sSPARSEMEM", others++ ? "|" : "");\ if (vt->flags & SPARSEMEM_EX) fprintf(fp, "%sSPARSEMEM_EX", others++ ? "|" : "");\ if (vt->flags & KMEM_CACHE_DELAY) fprintf(fp, "%sKMEM_CACHE_DELAY", others++ ? "|" : "");\ if (vt->flags & PERCPU_KMALLOC_V2_NODES) fprintf(fp, "%sPERCPU_KMALLOC_V2_NODES", others++ ? "|" : "");\ if (vt->flags & VM_STAT) fprintf(fp, "%sVM_STAT", others++ ? "|" : "");\ if (vt->flags & KMALLOC_SLUB) fprintf(fp, "%sKMALLOC_SLUB", others++ ? "|" : "");\ if (vt->flags & KMALLOC_COMMON) fprintf(fp, "%sKMALLOC_COMMON", others++ ? "|" : "");\ if (vt->flags & SLAB_OVERLOAD_PAGE) fprintf(fp, "%sSLAB_OVERLOAD_PAGE", others++ ? "|" : "");\ if (vt->flags & SLAB_CPU_CACHE) fprintf(fp, "%sSLAB_CPU_CACHE", others++ ? "|" : "");\ if (vt->flags & USE_VMAP_AREA) fprintf(fp, "%sUSE_VMAP_AREA", others++ ? "|" : "");\ if (vt->flags & CONFIG_NUMA) fprintf(fp, "%sCONFIG_NUMA", others++ ? "|" : "");\ if (vt->flags & VM_EVENT) fprintf(fp, "%sVM_EVENT", others++ ? "|" : "");\ if (vt->flags & PGCNT_ADJ) fprintf(fp, "%sPGCNT_ADJ", others++ ? "|" : "");\ if (vt->flags & PAGEFLAGS) fprintf(fp, "%sPAGEFLAGS", others++ ? "|" : "");\ if (vt->flags & SWAPINFO_V1) fprintf(fp, "%sSWAPINFO_V1", others++ ? "|" : "");\ if (vt->flags & SWAPINFO_V2) fprintf(fp, "%sSWAPINFO_V2", others++ ? "|" : "");\ if (vt->flags & NODELISTS_IS_PTR) fprintf(fp, "%sNODELISTS_IS_PTR", others++ ? "|" : "");\ if (vt->flags & VM_INIT) fprintf(fp, "%sVM_INIT", others++ ? "|" : "");\ fprintf(fp, ")\n"); if (vt->kernel_pgd[0] == vt->kernel_pgd[1]) fprintf(fp, " kernel_pgd[NR_CPUS]: %lx ...\n", vt->kernel_pgd[0]); else { fprintf(fp, " kernel_pgd[NR_CPUS]: "); for (i = 0; i < NR_CPUS; i++) { if ((i % 4) == 0) fprintf(fp, "\n "); fprintf(fp, "%lx ", vt->kernel_pgd[i]); } fprintf(fp, "\n"); } fprintf(fp, " high_memory: %lx\n", vt->high_memory); fprintf(fp, " vmalloc_start: %lx\n", vt->vmalloc_start); fprintf(fp, " mem_map: %lx\n", vt->mem_map); fprintf(fp, " total_pages: %ld\n", vt->total_pages); fprintf(fp, " max_mapnr: %ld\n", vt->max_mapnr); fprintf(fp, " totalram_pages: %ld\n", vt->totalram_pages); fprintf(fp, " totalhigh_pages: %ld\n", vt->totalhigh_pages); fprintf(fp, " num_physpages: %ld\n", vt->num_physpages); fprintf(fp, " page_hash_table: %lx\n", vt->page_hash_table); fprintf(fp, "page_hash_table_len: %d\n", vt->page_hash_table_len); fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen); fprintf(fp, "kmem_cache_len_nodes: %ld\n", vt->kmem_cache_len_nodes); fprintf(fp, " nr_bad_slab_caches: %d\n", vt->nr_bad_slab_caches); if (!vt->nr_bad_slab_caches) fprintf(fp, " bad_slab_caches: (unused)\n"); else { for (i = 0; i < vt->nr_bad_slab_caches; i++) { fprintf(fp, " bad_slab_caches[%d]: %lx\n", i, vt->bad_slab_caches[i]); } } fprintf(fp, " paddr_prlen: %d\n", vt->paddr_prlen); fprintf(fp, " numnodes: %d\n", vt->numnodes); fprintf(fp, " nr_zones: %d\n", vt->nr_zones); fprintf(fp, " nr_free_areas: %d\n", vt->nr_free_areas); for (i = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; fprintf(fp, " node_table[%d]: \n", i); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } fprintf(fp, " dump_free_pages: "); if (vt->dump_free_pages == dump_free_pages) fprintf(fp, "dump_free_pages()\n"); else if (vt->dump_free_pages == dump_free_pages_zones_v1) fprintf(fp, "dump_free_pages_zones_v1()\n"); else if (vt->dump_free_pages == dump_free_pages_zones_v2) fprintf(fp, "dump_free_pages_zones_v2()\n"); else if (vt->dump_free_pages == dump_multidimensional_free_pages) fprintf(fp, "dump_multidimensional_free_pages()\n"); else fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); fprintf(fp, " dump_kmem_cache: "); if (vt->dump_kmem_cache == dump_kmem_cache) fprintf(fp, "dump_kmem_cache()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1) fprintf(fp, "dump_kmem_cache_percpu_v1()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2) fprintf(fp, "dump_kmem_cache_percpu_v2()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_slub) fprintf(fp, "dump_kmem_cache_slub()\n"); else fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache); fprintf(fp, " slab_data: %lx\n", (ulong)vt->slab_data); if (verbose) dump_saved_slab_data(); fprintf(fp, " cpu_slab_type: %d\n", vt->cpu_slab_type); fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); fprintf(fp, " mem_sec: %lx\n", (ulong)vt->mem_sec); fprintf(fp, " mem_section: %lx\n", (ulong)vt->mem_section); fprintf(fp, " ZONE_HIGHMEM: %d\n", vt->ZONE_HIGHMEM); fprintf(fp, "node_online_map_len: %d\n", vt->node_online_map_len); if (vt->node_online_map_len) { fprintf(fp, " node_online_map: "); up = (ulong *)vt->node_online_map; for (i = 0; i < vt->node_online_map_len; i++) { fprintf(fp, "%s%lx", i ? ", " : "[", *up); up++; } fprintf(fp, "]\n"); } else { fprintf(fp, " node_online_map: (unused)\n"); } fprintf(fp, " nr_vm_stat_items: %d\n", vt->nr_vm_stat_items); fprintf(fp, " vm_stat_items: %s", (vt->flags & VM_STAT) ? "\n" : "(not used)\n"); for (i = 0; i < vt->nr_vm_stat_items; i++) fprintf(fp, " [%d] %s\n", i, vt->vm_stat_items[i]); fprintf(fp, " nr_vm_event_items: %d\n", vt->nr_vm_event_items); fprintf(fp, " vm_event_items: %s", (vt->flags & VM_EVENT) ? "\n" : "(not used)\n"); for (i = 0; i < vt->nr_vm_event_items; i++) fprintf(fp, " [%d] %s\n", i, vt->vm_event_items[i]); fprintf(fp, " PG_reserved: %lx\n", vt->PG_reserved); fprintf(fp, " PG_slab: %ld (%lx)\n", vt->PG_slab, (ulong)1 << vt->PG_slab); fprintf(fp, " PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); fprintf(fp, " nr_pageflags: %d\n", vt->nr_pageflags); fprintf(fp, " pageflags_data: %s\n", vt->nr_pageflags ? "" : "(not used)"); for (i = 0; i < vt->nr_pageflags; i++) { fprintf(fp, " %s[%d] %08lx: %s\n", i < 10 ? " " : "", i, vt->pageflags_data[i].mask, vt->pageflags_data[i].name); } dump_vma_cache(VERBOSE); } /* * Calculate the amount of memory referenced in the kernel-specific "nodes". */ uint64_t total_node_memory() { int i; struct node_table *nt; uint64_t total; for (i = total = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; if (CRASHDEBUG(1)) { console("node_table[%d]: \n", i); console(" id: %d\n", nt->node_id); console(" pgdat: %lx\n", nt->pgdat); console(" size: %ld\n", nt->size); console(" present: %ld\n", nt->present); console(" mem_map: %lx\n", nt->mem_map); console(" start_paddr: %lx\n", nt->start_paddr); console(" start_mapnr: %ld\n", nt->start_mapnr); } if (nt->present) total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE()); else total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); } return total; } /* * Dump just the vm_area_struct cache table data so that it can be * called from above or for debug purposes. */ void dump_vma_cache(ulong verbose) { int i; ulong vhits; if (!verbose) goto show_hits; for (i = 0; i < VMA_CACHE; i++) fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", i, vt->cached_vma[i], vt->cached_vma_hits[i]); fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); fflush(fp); show_hits: if (vt->vma_cache_fills) { for (i = vhits = 0; i < VMA_CACHE; i++) vhits += vt->cached_vma_hits[i]; fprintf(stderr, "%s vma hit rate: %2ld%% (%ld of %ld)\n", verbose ? "" : " ", (vhits * 100)/vt->vma_cache_fills, vhits, vt->vma_cache_fills); } } /* * Guess at the "real" amount of physical memory installed, formatting * it in a MB or GB based string. */ char * get_memory_size(char *buf) { uint64_t total; ulong next_gig; #ifdef OLDWAY ulong mbs, gbs; #endif total = machdep->memory_size(); if ((next_gig = roundup(total, GIGABYTES(1)))) { if ((next_gig - total) <= MEGABYTES(64)) total = next_gig; } return (pages_to_size((ulong)(total/PAGESIZE()), buf)); #ifdef OLDWAY gbs = (ulong)(total/GIGABYTES(1)); mbs = (ulong)(total/MEGABYTES(1)); if (gbs) mbs = (total % GIGABYTES(1))/MEGABYTES(1); if (total%MEGABYTES(1)) mbs++; if (gbs) sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); else sprintf(buf, "%ld MB", mbs); return buf; #endif } /* * For use by architectures not having machine-specific manners for * best determining physical memory size. */ uint64_t generic_memory_size(void) { if (machdep->memsize) return machdep->memsize; return (machdep->memsize = total_node_memory()); } /* * Determine whether a virtual address is user or kernel or ambiguous. */ int vaddr_type(ulong vaddr, struct task_context *tc) { int memtype, found; if (!tc) tc = CURRENT_CONTEXT(); memtype = found = 0; if (machdep->is_uvaddr(vaddr, tc)) { memtype |= UVADDR; found++; } if (machdep->is_kvaddr(vaddr)) { memtype |= KVADDR; found++; } if (found == 1) return memtype; else return AMBIGUOUS; } /* * Determine the first valid user space address */ static int address_space_start(struct task_context *tc, ulong *addr) { ulong vma; char *vma_buf; if (!tc->mm_struct) return FALSE; fill_mm_struct(tc->mm_struct); vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); if (!vma) return FALSE; vma_buf = fill_vma_cache(vma); *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); return TRUE; } int generic_get_kvaddr_ranges(struct vaddr_range *rp) { int cnt; if (XEN_HYPER_MODE()) return 0; cnt = 0; rp[cnt].type = KVADDR_UNITY_MAP; rp[cnt].start = machdep->kvbase; rp[cnt++].end = vt->vmalloc_start; rp[cnt].type = KVADDR_VMALLOC; rp[cnt].start = vt->vmalloc_start; rp[cnt++].end = (ulong)(-1); return cnt; } /* * Search for a given value between a starting and ending address range, * applying an optional mask for "don't care" bits. As an alternative * to entering the starting address value, -k means "start of kernel address * space". For processors with ambiguous user/kernel address spaces, * -u or -k must be used (with or without -s) as a differentiator. */ void cmd_search(void) { int i, c, memtype, ranges, context, max; ulonglong start, end; ulong value, mask, len; ulong uvaddr_start, uvaddr_end; ulong kvaddr_start, kvaddr_end, range_end; int sflag, Kflag, Vflag, pflag, tflag; struct searchinfo searchinfo; struct syment *sp; struct node_table *nt; struct vaddr_range vaddr_ranges[MAX_KVADDR_RANGES]; struct vaddr_range *vrp; struct task_context *tc; #define vaddr_overflow(ADDR) (BITS32() && ((ADDR) > 0xffffffffULL)) #define uint_overflow(VALUE) ((VALUE) > 0xffffffffUL) #define ushort_overflow(VALUE) ((VALUE) > 0xffffUL) context = max = 0; start = end = 0; value = mask = sflag = pflag = Kflag = Vflag = memtype = len = tflag = 0; kvaddr_start = kvaddr_end = 0; uvaddr_start = UNINITIALIZED; uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; BZERO(&searchinfo, sizeof(struct searchinfo)); vrp = &vaddr_ranges[0]; ranges = machdep->get_kvaddr_ranges(vrp); if (CRASHDEBUG(1)) { fprintf(fp, "kvaddr ranges:\n"); for (i = 0; i < ranges; i++) { fprintf(fp, " [%d] %lx %lx ", i, vrp[i].start, vrp[i].end); switch (vrp[i].type) { case KVADDR_UNITY_MAP: fprintf(fp, "KVADDR_UNITY_MAP\n"); break; case KVADDR_START_MAP: fprintf(fp, "KVADDR_START_MAP\n"); break; case KVADDR_VMALLOC: fprintf(fp, "KVADDR_VMALLOC\n"); break; case KVADDR_MODULES: fprintf(fp, "KVADDR_MODULES\n"); break; case KVADDR_VMEMMAP: fprintf(fp, "KVADDR_VMEMMAP\n"); break; } } } searchinfo.mode = SEARCH_ULONG; /* default search */ while ((c = getopt(argcnt, args, "tl:ukKVps:e:v:m:hwcx:")) != EOF) { switch(c) { case 'u': if (XEN_HYPER_MODE()) error(FATAL, "-u option is not applicable to the " "Xen hypervisor\n"); if (is_kernel_thread(CURRENT_TASK()) || !task_mm(CURRENT_TASK(), TRUE)) error(FATAL, "current context has no user address space\n"); if (!sflag) { address_space_start(CURRENT_CONTEXT(), &uvaddr_start); start = (ulonglong)uvaddr_start; } memtype = UVADDR; sflag++; break; case 'p': if (XEN_HYPER_MODE()) error(FATAL, "-p option is not applicable to the " "Xen hypervisor\n"); memtype = PHYSADDR; if (!sflag) { nt = &vt->node_table[0]; start = nt->start_paddr; } sflag++; break; case 'V': case 'K': case 'k': if (XEN_HYPER_MODE()) error(FATAL, "-%c option is not applicable to the " "Xen hypervisor\n", c); if (!sflag) start = vrp[0].start; memtype = KVADDR; sflag++; if (c == 'K') Kflag++; else if (c == 'V') Vflag++; break; case 's': if ((sp = symbol_search(optarg))) start = (ulonglong)sp->value; else start = htoll(optarg, FAULT_ON_ERROR, NULL); sflag++; break; case 'e': if ((sp = symbol_search(optarg))) end = (ulonglong)sp->value; else end = htoll(optarg, FAULT_ON_ERROR, NULL); if (!end) error(FATAL, "invalid ending address: 0\n"); break; case 'l': len = stol(optarg, FAULT_ON_ERROR, NULL); break; case 'm': mask = htol(optarg, FAULT_ON_ERROR, NULL); break; case 'h': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search mode with \"h\"\n"); searchinfo.mode = SEARCH_USHORT; break; case 'w': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search mode with \"w\"\n"); searchinfo.mode = SEARCH_UINT; break; case 'c': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search type with \"c\"\n"); searchinfo.mode = SEARCH_CHARS; break; case 'x': context = dtoi(optarg, FAULT_ON_ERROR, NULL); break; case 't': if (XEN_HYPER_MODE()) error(FATAL, "-t option is not applicable to the " "Xen hypervisor\n"); tflag++; break; default: argerrs++; break; } } if (tflag && (memtype || start || end || len)) error(FATAL, "-t option cannot be used with other " "memory-selection options\n"); if (XEN_HYPER_MODE()) { memtype = KVADDR; if (!sflag) error(FATAL, "the \"-s start\" option is required for" " the Xen hypervisor\n"); } else if (!memtype) { memtype = KVADDR; if (!tflag && !sflag++) start = vrp[0].start; } if (argerrs || (!sflag && !tflag) || !args[optind] || (len && end) || !memtype) cmd_usage(pc->curcmd, SYNOPSIS); searchinfo.memtype = memtype; /* * Verify starting address. */ switch (memtype) { case UVADDR: if (vaddr_overflow(start) || !IS_UVADDR((ulong)start, CURRENT_CONTEXT())) { error(INFO, "invalid user virtual address: %llx\n", start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (tflag) break; if (vaddr_overflow(start) || !IS_KVADDR((ulong)start)) { error(INFO, "invalid kernel virtual address: %llx\n", (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case AMBIGUOUS: error(INFO, "ambiguous virtual address: %llx (requires -u or -k)\n", (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } /* * Set up ending address if necessary. */ if (!end && !len && !tflag) { switch (memtype) { case UVADDR: end = (ulonglong)uvaddr_end; break; case KVADDR: if (XEN_HYPER_MODE()) end = (ulong)(-1); else { range_end = 0; for (i = 0; i < ranges; i++) { if (vrp[i].end > range_end) range_end = vrp[i].end; } end = (ulonglong)range_end; } break; case PHYSADDR: nt = &vt->node_table[vt->numnodes-1]; end = nt->start_paddr + (nt->size * PAGESIZE()); break; } } else if (len) end = start + len; /* * Final verification and per-type start/end variable setting. */ switch (memtype) { case UVADDR: uvaddr_start = (ulong)start; if (end > (ulonglong)uvaddr_end) { error(INFO, "ending address %lx is in kernel space: %llx\n", end); cmd_usage(pc->curcmd, SYNOPSIS); } if (end < (ulonglong)uvaddr_end) uvaddr_end = (ulong)end; if (uvaddr_end < uvaddr_start) { error(INFO, "ending address %lx is below starting address %lx\n", uvaddr_end, uvaddr_start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (tflag) break; kvaddr_start = (ulong)start; kvaddr_end = (ulong)end; if (kvaddr_end < kvaddr_start) { error(INFO, "ending address %lx is below starting address %lx\n", kvaddr_end, kvaddr_start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case PHYSADDR: if (end < start) { error(INFO, "ending address %llx is below starting address %llx\n", (ulonglong)end, (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } break; } if (mask) { switch (searchinfo.mode) { case SEARCH_ULONG: searchinfo.s_parms.s_ulong.mask = mask; break; case SEARCH_UINT: searchinfo.s_parms.s_uint.mask = mask; break; case SEARCH_USHORT: searchinfo.s_parms.s_ushort.mask = mask; break; case SEARCH_CHARS: error(INFO, "mask ignored on string search\n"); break; } } if (context) { switch (searchinfo.mode) { case SEARCH_ULONG: max = PAGESIZE()/sizeof(long); break; case SEARCH_UINT: max = PAGESIZE()/sizeof(int); break; case SEARCH_USHORT: max = PAGESIZE()/sizeof(short); break; case SEARCH_CHARS: error(FATAL, "-x option is not allowed with -c\n"); break; } if (context > max) error(FATAL, "context value %d is too large: maximum is %d\n", context, max); searchinfo.context = context; } searchinfo.vcnt = 0; searchinfo.val = UNUSED; while (args[optind]) { switch (searchinfo.mode) { case SEARCH_ULONG: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ulong.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_ulong.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ulong.value[searchinfo.vcnt] = value; searchinfo.vcnt++; break; case SEARCH_UINT: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_uint.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_uint.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_uint.value[searchinfo.vcnt] = value; if (uint_overflow(value)) error(FATAL, "value too large for -w option: %lx %s\n", value, show_opt_string(&searchinfo)); searchinfo.vcnt++; break; case SEARCH_USHORT: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ushort.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_ushort.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ushort.value[searchinfo.vcnt] = value; if (ushort_overflow(value)) error(FATAL, "value too large for -h option: %lx %s\n", value, show_opt_string(&searchinfo)); searchinfo.vcnt++; break; case SEARCH_CHARS: /* parser can deliver empty strings */ if (strlen(args[optind])) { searchinfo.s_parms.s_chars.value[searchinfo.vcnt] = args[optind]; searchinfo.s_parms.s_chars.len[searchinfo.vcnt] = strlen(args[optind]); searchinfo.vcnt++; } break; } optind++; } if (!searchinfo.vcnt) cmd_usage(pc->curcmd, SYNOPSIS); switch (memtype) { case PHYSADDR: searchinfo.paddr_start = start; searchinfo.paddr_end = end; search_physical(&searchinfo); break; case UVADDR: searchinfo.vaddr_start = uvaddr_start; searchinfo.vaddr_end = uvaddr_end; search_virtual(&searchinfo); break; case KVADDR: if (XEN_HYPER_MODE()) { searchinfo.vaddr_start = kvaddr_start; searchinfo.vaddr_end = kvaddr_end; search_virtual(&searchinfo); break; } if (tflag) { searchinfo.tasks_found = 0; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { searchinfo.vaddr_start = GET_STACKBASE(tc->task); searchinfo.vaddr_end = GET_STACKTOP(tc->task); searchinfo.task_context = tc; searchinfo.do_task_header = TRUE; search_virtual(&searchinfo); } break; } for (i = 0; i < ranges; i++) { if ((kvaddr_start >= vrp[i].end) || (kvaddr_end <= vrp[i].start)) continue; switch (vrp[i].type) { case KVADDR_UNITY_MAP: case KVADDR_START_MAP: if (Vflag) continue; break; case KVADDR_VMALLOC: case KVADDR_MODULES: case KVADDR_VMEMMAP: if (Kflag) continue; break; } pc->curcmd_private = vrp[i].type; searchinfo.vaddr_start = kvaddr_start > vrp[i].start ? kvaddr_start : vrp[i].start; searchinfo.vaddr_end = (kvaddr_end < vrp[i].end) ? kvaddr_end : vrp[i].end; search_virtual(&searchinfo); } break; } } /* * Do the work for cmd_search(). */ static char * show_opt_string(struct searchinfo *si) { char *opt_string; int index; index = (si->val == UNUSED) ? si->vcnt : si->val; switch (si->mode) { case SEARCH_USHORT: opt_string = si->s_parms.s_ushort.opt_string[index]; break; case SEARCH_UINT: opt_string = si->s_parms.s_uint.opt_string[index]; break; case SEARCH_ULONG: default: opt_string = si->s_parms.s_ulong.opt_string[index]; break; } if (!opt_string) return ""; else if (FIRSTCHAR(opt_string) == '(') return opt_string; else { sprintf(si->buf, "(%s)", opt_string); return si->buf; } } #define SEARCHMASK(X) ((X) | mask) static void display_with_pre_and_post(void *bufptr, ulonglong addr, struct searchinfo *si) { int ctx, memtype, t, amount; ulonglong addr_d; ulong flag; char buf[BUFSIZE]; ctx = si->context; memtype = si->memtype; flag = HEXADECIMAL|NO_ERROR|ASCII_ENDLINE; switch (si->mode) { case SEARCH_USHORT: t = sizeof(ushort); break; case SEARCH_UINT: t = sizeof(uint); break; case SEARCH_ULONG: default: t = sizeof(ulong); break; } switch (t) { case 8: flag |= DISPLAY_64; break; case 4: flag |= DISPLAY_32; break; case 2: flag |= DISPLAY_16; break; } amount = ctx * t; addr_d = addr - amount < 0 ? 0 : addr - amount; display_memory(addr_d, ctx, flag, memtype, NULL); BZERO(buf, BUFSIZE); fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); switch(si->mode) { case SEARCH_ULONG: fprintf(fp, "%lx %s\n", *((ulong *)bufptr), show_opt_string(si)); break; case SEARCH_UINT: fprintf(fp, "%x %s\n", *((uint *)bufptr), show_opt_string(si)); break; case SEARCH_USHORT: fprintf(fp, "%x %s\n", *((ushort *)bufptr), show_opt_string(si)); break; } addr_d = addr + t; display_memory(addr_d, ctx, flag, memtype, NULL); fprintf(fp, "\n"); } static ulong search_ulong(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; ulong mask = si->s_parms.s_ulong.mask; for (i = 0; i < longcnt; i++, bufptr++, addr += sizeof(long)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*bufptr) == SEARCHMASK(si->s_parms.s_ulong.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(bufptr, addr, si); else fprintf(fp, "%lx: %lx %s\n", addr, *bufptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_ulong_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; ulong mask = si->s_parms.s_ulong.mask; for (i = 0; i < longcnt; i++, bufptr++, addr += sizeof(long)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*bufptr) == SEARCHMASK(si->s_parms.s_ulong.value[si->val])) { if (si->context) display_with_pre_and_post(bufptr, addr, si); else fprintf(fp, "%llx: %lx %s\n", addr, *bufptr, show_opt_string(si)); } } } return addr; } static ulong search_uint(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(int)); uint *ptr = (uint *)bufptr; uint mask = si->s_parms.s_uint.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(int)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_uint.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%lx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_uint_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(int)); uint *ptr = (uint *)bufptr; uint mask = si->s_parms.s_uint.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(int)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_uint.value[si->val])) { if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%llx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } static ulong search_ushort(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(short)); ushort *ptr = (ushort *)bufptr; ushort mask = si->s_parms.s_ushort.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(short)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_ushort.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%lx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_ushort_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(short)); ushort *ptr = (ushort *)bufptr; ushort mask = si->s_parms.s_ushort.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(short)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_ushort.value[si->val])) { if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%llx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* * String search "memory" to remember possible matches that cross * page (or search buffer) boundaries. * The cross_match zone is the last strlen-1 chars of the page for * each of the possible targets. */ struct cross_match { int cnt; /* possible hits in the cross_match zone */ ulong addr; /* starting addr of crossing match zone for this target */ ulonglong addr_p; /* for physical search */ char hit[BUFSIZE]; /* array of hit locations in the crossing match zone */ /* This should really be the much-smaller MAXARGLEN, but * no one seems to be enforcing that in the parser. */ } cross[MAXARGS]; ulong cross_match_next_addr; /* the expected starting value of the next page */ ulonglong cross_match_next_addr_p; /* the expected starting value of the next physical page */ #define CHARS_CTX 56 static void report_match(struct searchinfo *si, ulong addr, char *ptr1, int len1, char *ptr2, int len2) { int i; if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } fprintf(fp, "%lx: ", addr); for (i = 0; i < len1; i++) { if (isprint(ptr1[i])) fprintf(fp, "%c", ptr1[i]); else fprintf(fp, "."); } for (i = 0; i < len2; i++) { if (isprint(ptr2[i])) fprintf(fp, "%c", ptr2[i]); else fprintf(fp, "."); } fprintf(fp, "\n"); } static ulong search_chars(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i, j; int len; char *target; int charcnt = longcnt * sizeof(long); char *ptr = (char *)bufptr; /* is this the first page of this search? */ if (si->s_parms.s_chars.started_flag == 0) { for (j = 0; j < si->vcnt; j++) { cross[j].cnt = 0; /* no hits */ } cross_match_next_addr = (ulong)-1; /* no page match for first page */ si->s_parms.s_chars.started_flag++; } if (cross_match_next_addr == addr) { for (j = 0; j < si->vcnt; j++) { if (cross[j].cnt) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; for (i = 0; i < len - 1; i++) { if (cross[j].hit[i] && !strncmp(&target[len - 1 - i], ptr, i + 1)) report_match(si, cross[j].addr + i, target, len, &ptr[i+1], CHARS_CTX - len); } } } } /* set up for possible cross matches on this page */ cross_match_next_addr = addr + charcnt; for (j = 0; j < si->vcnt; j++) { len = si->s_parms.s_chars.len[j]; cross[j].cnt = 0; cross[j].addr = addr + longcnt * sizeof(long) - (len - 1); for (i = 0; i < len - 1; i++) cross[j].hit[i] = 0; } for (i = 0; i < charcnt; i++, ptr++, addr++) { for (j = 0; j < si->vcnt; j++) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; if ((i + len) > charcnt) { /* check for cross match */ if (!strncmp(target, ptr, charcnt - i)) { cross[j].hit[len + i - charcnt - 1] = 1; cross[j].cnt++; } } else { if (!strncmp(target, ptr, len)) { int slen = CHARS_CTX; if ((i + CHARS_CTX) > charcnt) slen = charcnt - i; report_match(si, addr, ptr, slen, (char *)0, 0); } } } } return addr; } static void report_match_p(ulonglong addr, char *ptr1, int len1, char *ptr2, int len2) { int i; fprintf(fp, "%llx: ", addr); for (i = 0; i < len1; i++) { if (isprint(ptr1[i])) fprintf(fp, "%c", ptr1[i]); else fprintf(fp, "."); } for (i = 0; i < len2; i++) { if (isprint(ptr2[i])) fprintf(fp, "%c", ptr2[i]); else fprintf(fp, "."); } fprintf(fp, "\n"); } static ulonglong search_chars_p(ulong *bufptr, ulonglong addr_p, int longcnt, struct searchinfo *si) { int i, j; int len; char *target; int charcnt = longcnt * sizeof(long); char *ptr = (char *)bufptr; /* is this the first page of this search? */ if (si->s_parms.s_chars.started_flag == 0) { for (j = 0; j < si->vcnt; j++) { cross[j].cnt = 0; /* no hits */ } cross_match_next_addr_p = (ulonglong)-1; /* no page match for first page */ si->s_parms.s_chars.started_flag++; } if (cross_match_next_addr_p == addr_p) { for (j = 0; j < si->vcnt; j++) { if (cross[j].cnt) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; for (i = 0; i < len - 1; i++) { if (cross[j].hit[i] && !strncmp(&target[len - 1 - i], ptr, i + 1)) report_match_p(cross[j].addr_p + i, target, len, &ptr[i+1], CHARS_CTX - len); } } } } /* set up for possible cross matches on this page */ cross_match_next_addr_p = addr_p + charcnt; for (j = 0; j < si->vcnt; j++) { len = si->s_parms.s_chars.len[j]; cross[j].cnt = 0; cross[j].addr_p = addr_p + longcnt * sizeof(long) - (len - 1); for (i = 0; i < len - 1; i++) cross[j].hit[i] = 0; } for (i = 0; i < charcnt; i++, ptr++, addr_p++) { for (j = 0; j < si->vcnt; j++) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; if ((i + len) > charcnt) { /* check for cross match */ if (!strncmp(target, ptr, charcnt - i)) { cross[j].hit[len + i - charcnt - 1] = 1; cross[j].cnt++; } } else { if (!strncmp(target, ptr, len)) { int slen = CHARS_CTX; if ((i + CHARS_CTX) > charcnt) slen = charcnt - i; report_match_p(addr_p, ptr, slen, (char *)0, 0); } } } } return addr_p; } static void search_virtual(struct searchinfo *si) { ulong start, end; ulong pp, next, *ubp; int wordcnt, lastpage; ulong page; physaddr_t paddr; char *pagebuf; ulong pct, pages_read, pages_checked; time_t begin, finish; start = si->vaddr_start; end = si->vaddr_end; pages_read = pages_checked = 0; begin = finish = 0; pagebuf = GETBUF(PAGESIZE()); if (start & (sizeof(long)-1)) { start &= ~(sizeof(long)-1); error(INFO, "rounding down start address to: %lx\n", start); } if (CRASHDEBUG(1)) { begin = time(NULL); fprintf(fp, "search_virtual: start: %lx end: %lx\n", start, end); } next = start; for (pp = VIRTPAGEBASE(start); next < end; next = pp) { pages_checked++; lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); if (LKCD_DUMPFILE()) set_lkcd_nohash(); /* * Keep it virtual for Xen hypervisor. */ if (XEN_HYPER_MODE()) { if (!readmem(pp, KVADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(1)) fprintf(fp, "search suspended at: %lx\n", pp); goto done; } goto virtual; } switch (si->memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || !phys_to_page(paddr, &page)) { if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) goto done; continue; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || !phys_to_page(paddr, &page)) { if (!next_kpage(pp, &pp)) goto done; continue; } break; } if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { pp += PAGESIZE(); continue; } virtual: pages_read++; ubp = (ulong *)&pagebuf[next - pp]; if (lastpage) { if (end == (ulong)(-1)) wordcnt = PAGESIZE()/sizeof(long); else wordcnt = (end - next)/sizeof(long); } else wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); switch (si->mode) { case SEARCH_ULONG: next = search_ulong(ubp, next, wordcnt, si); break; case SEARCH_UINT: next = search_uint(ubp, next, wordcnt, si); break; case SEARCH_USHORT: next = search_ushort(ubp, next, wordcnt, si); break; case SEARCH_CHARS: next = search_chars(ubp, next, wordcnt, si); break; default: /* unimplemented search type */ next += wordcnt * (sizeof(long)); break; } if (CRASHDEBUG(1)) if ((pp % (1024*1024)) == 0) console("%lx\n", pp); pp += PAGESIZE(); } done: if (CRASHDEBUG(1)) { finish = time(NULL); pct = (pages_read * 100)/pages_checked; fprintf(fp, "search_virtual: read %ld (%ld%%) of %ld pages checked in %ld seconds\n", pages_read, pct, pages_checked, finish - begin); } FREEBUF(pagebuf); } static void search_physical(struct searchinfo *si) { ulonglong start_in, end_in; ulong *ubp; int wordcnt, lastpage; ulonglong pnext, ppp; char *pagebuf; ulong pct, pages_read, pages_checked; time_t begin, finish; ulong page; start_in = si->paddr_start; end_in = si->paddr_end; pages_read = pages_checked = 0; begin = finish = 0; pagebuf = GETBUF(PAGESIZE()); if (start_in & (sizeof(ulonglong)-1)) { start_in &= ~(sizeof(ulonglong)-1); error(INFO, "rounding down start address to: %llx\n", (ulonglong)start_in); } if (CRASHDEBUG(1)) { begin = time(NULL); fprintf(fp, "search_physical: start: %llx end: %llx\n", start_in, end_in); } pnext = start_in; for (ppp = PHYSPAGEBASE(start_in); pnext < end_in; pnext = ppp) { pages_checked++; lastpage = (PHYSPAGEBASE(pnext) == PHYSPAGEBASE(end_in)); if (LKCD_DUMPFILE()) set_lkcd_nohash(); if (!phys_to_page(ppp, &page) || !readmem(ppp, PHYSADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { if (!next_physpage(ppp, &ppp)) break; continue; } pages_read++; ubp = (ulong *)&pagebuf[pnext - ppp]; if (lastpage) { if (end_in == (ulonglong)(-1)) wordcnt = PAGESIZE()/sizeof(long); else wordcnt = (end_in - pnext)/sizeof(long); } else wordcnt = (PAGESIZE() - (pnext - ppp))/sizeof(long); switch (si->mode) { case SEARCH_ULONG: pnext = search_ulong_p(ubp, pnext, wordcnt, si); break; case SEARCH_UINT: pnext = search_uint_p(ubp, pnext, wordcnt, si); break; case SEARCH_USHORT: pnext = search_ushort_p(ubp, pnext, wordcnt, si); break; case SEARCH_CHARS: pnext = search_chars_p(ubp, pnext, wordcnt, si); break; default: /* unimplemented search type */ pnext += wordcnt * (sizeof(long)); break; } ppp += PAGESIZE(); } if (CRASHDEBUG(1)) { finish = time(NULL); pct = (pages_read * 100)/pages_checked; fprintf(fp, "search_physical: read %ld (%ld%%) of %ld pages checked in %ld seconds\n", pages_read, pct, pages_checked, finish - begin); } FREEBUF(pagebuf); } /* * Return the next mapped user virtual address page that comes after * the passed-in address. */ static int next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) { ulong vma, total_vm; char *vma_buf; ulong vm_start, vm_end; ulong vm_next; if (!tc->mm_struct) return FALSE; fill_mm_struct(tc->mm_struct); vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); if (!vma || (total_vm == 0)) return FALSE; vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ for ( ; vma; vma = vm_next) { vma_buf = fill_vma_cache(vma); vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); if (vaddr <= vm_start) { *nextvaddr = vm_start; return TRUE; } if ((vaddr > vm_start) && (vaddr < vm_end)) { *nextvaddr = vaddr; return TRUE; } } return FALSE; } /* * Return the next mapped kernel virtual address in the vmlist * that is equal to or comes after the passed-in address. * Prevent repeated calls to dump_vmlist() by only doing it * one time for dumpfiles, or one time per (active) command. */ static int next_vmlist_vaddr(ulong vaddr, ulong *nextvaddr) { int i, retval; ulong cnt; struct meminfo meminfo, *mi; static int count = 0; static struct vmlist *vmlist = NULL; static ulong cmdgencur = BADVAL; /* * Search the stashed vmlist if possible. */ if (vmlist && ACTIVE()) { if (pc->cmdgencur != cmdgencur) { free(vmlist); vmlist = NULL; } } if (vmlist) { for (i = 0, retval = FALSE; i < count; i++) { if (vaddr <= vmlist[i].addr) { *nextvaddr = vmlist[i].addr; retval = TRUE; break; } if (vaddr < (vmlist[i].addr + vmlist[i].size)) { *nextvaddr = vaddr; retval = TRUE; break; } } return retval; } mi = &meminfo; BZERO(mi, sizeof(struct meminfo)); mi->flags = GET_VMLIST_COUNT; dump_vmlist(mi); cnt = mi->retval; if (!cnt) return FALSE; mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*cnt); mi->flags = GET_VMLIST; dump_vmlist(mi); for (i = 0, retval = FALSE; i < cnt; i++) { if (vaddr <= mi->vmlist[i].addr) { *nextvaddr = mi->vmlist[i].addr; retval = TRUE; break; } if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size)) { *nextvaddr = vaddr; retval = TRUE; break; } } if (!vmlist) { vmlist = (struct vmlist *) malloc(sizeof(struct vmlist)*cnt); if (vmlist) { BCOPY(mi->vmlist, vmlist, sizeof(struct vmlist)*cnt); count = cnt; cmdgencur = pc->cmdgencur; } } FREEBUF(mi->vmlist); return retval; } /* * Determine whether a virtual address is inside a vmlist segment. */ int in_vmlist_segment(ulong vaddr) { ulong next; if (next_vmlist_vaddr(vaddr, &next) && (vaddr == next)) return TRUE; return FALSE; } /* * Return the next kernel module virtual address that is * equal to or comes after the passed-in address. */ static int next_module_vaddr(ulong vaddr, ulong *nextvaddr) { int i; ulong start, end; struct load_module *lm; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; start = lm->mod_base; end = lm->mod_base + lm->mod_size; if (vaddr >= end) continue; /* * Either below or in this module. */ if (vaddr < start) *nextvaddr = start; else *nextvaddr = vaddr; return TRUE; } return FALSE; } /* * Return the next kernel virtual address page in a designated * kernel virtual address range that comes after the passed-in, * untranslatable, address. */ static int next_kpage(ulong vaddr, ulong *nextvaddr) { ulong vaddr_orig; vaddr_orig = vaddr; vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ if (vaddr < vaddr_orig) /* wrapped back to zero? */ return FALSE; switch (pc->curcmd_private) { case KVADDR_UNITY_MAP: return next_identity_mapping(vaddr, nextvaddr); case KVADDR_VMALLOC: return next_vmlist_vaddr(vaddr, nextvaddr); case KVADDR_VMEMMAP: *nextvaddr = vaddr; return TRUE; case KVADDR_START_MAP: *nextvaddr = vaddr; return TRUE; case KVADDR_MODULES: return next_module_vaddr(vaddr, nextvaddr); } return FALSE; } /* * Return the next physical address page that comes after * the passed-in, unreadable, address. */ static int next_physpage(ulonglong paddr, ulonglong *nextpaddr) { int n; ulonglong node_start; ulonglong node_end; struct node_table *nt; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_start = nt->start_paddr; node_end = nt->start_paddr + (nt->size * PAGESIZE()); if (paddr >= node_end) continue; if (paddr < node_start) { *nextpaddr = node_start; return TRUE; } if (paddr < node_end) { *nextpaddr = paddr + PAGESIZE(); return TRUE; } } return FALSE; } static int get_hugetlb_total_pages(ulong *nr_total_pages) { ulong hstate_p, vaddr; int i, len; ulong nr_huge_pages; uint horder; *nr_total_pages = 0; if (kernel_symbol_exists("hstates")) { if (INVALID_SIZE(hstate) || INVALID_MEMBER(hstate_order) || INVALID_MEMBER(hstate_nr_huge_pages)) return FALSE; len = get_array_length("hstates", NULL, 0); hstate_p = symbol_value("hstates"); for (i = 0; i < len; i++) { vaddr = hstate_p + (SIZE(hstate) * i); readmem(vaddr + OFFSET(hstate_order), KVADDR, &horder, sizeof(uint), "hstate_order", FAULT_ON_ERROR); if (!horder) continue; readmem(vaddr + OFFSET(hstate_nr_huge_pages), KVADDR, &nr_huge_pages, sizeof(ulong), "hstate_nr_huge_pages", FAULT_ON_ERROR); *nr_total_pages += nr_huge_pages * (1 << horder); } } else if (kernel_symbol_exists("nr_huge_pages")) { unsigned long hpage_shift = 21; if ((machine_type("X86") && !(machdep->flags & PAE))) hpage_shift = 22; get_symbol_data("nr_huge_pages", sizeof(ulong), &nr_huge_pages); *nr_total_pages = nr_huge_pages * ((1 << hpage_shift) / machdep->pagesize); } return TRUE; } /* * Display swap statistics. */ void cmd_swap(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); dump_swap_info(VERBOSE, NULL, NULL); } /* * Do the work for cmd_swap(). */ #define SWP_USED 1 #define SWAP_MAP_BAD 0x8000 char *swap_info_hdr = \ "SWAP_INFO_STRUCT TYPE SIZE USED PCT PRI FILENAME\n"; static int dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) { int i, j; int swap_device, prio; ulong pages, usedswap; ulong flags, swap_file, max, swap_map, pct; ulong vfsmnt; ulong swap_info, swap_info_ptr; ushort *smap; ulong inuse_pages, totalswap, totalused; char *devname; char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; if (!symbol_exists("nr_swapfiles")) error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); if (!symbol_exists("swap_info")) error(FATAL, "swap_info doesn't exist in this kernel!\n"); swap_info_init(); swap_info = symbol_value("swap_info"); if (swapflags & VERBOSE) fprintf(fp, "%s", swap_info_hdr); totalswap = totalused = 0; for (i = 0; i < vt->nr_swapfiles; i++, swap_info += (vt->flags & SWAPINFO_V1 ? SIZE(swap_info_struct) : sizeof(void *))) { if (vt->flags & SWAPINFO_V2) { if (!readmem(swap_info, KVADDR, &swap_info_ptr, sizeof(void *), "swap_info pointer", QUIET|RETURN_ON_ERROR)) continue; if (!swap_info_ptr) continue; fill_swap_info(swap_info_ptr); } else fill_swap_info(swap_info); if (MEMBER_SIZE("swap_info_struct", "flags") == sizeof(uint)) flags = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_flags)); else flags = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_flags)); if (!(flags & SWP_USED)) continue; swap_file = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_file)); swap_device = INT(vt->swap_info_struct + OFFSET_OPTION(swap_info_struct_swap_device, swap_info_struct_old_block_size)); pages = INT(vt->swap_info_struct + OFFSET(swap_info_struct_pages)); totalswap += pages; pages <<= (PAGESHIFT() - 10); inuse_pages = 0; if (MEMBER_SIZE("swap_info_struct", "prio") == sizeof(short)) prio = SHORT(vt->swap_info_struct + OFFSET(swap_info_struct_prio)); else prio = INT(vt->swap_info_struct + OFFSET(swap_info_struct_prio)); if (MEMBER_SIZE("swap_info_struct", "max") == sizeof(int)) max = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_max)); else max = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_max)); if (VALID_MEMBER(swap_info_struct_inuse_pages)) { if (MEMBER_SIZE("swap_info_struct", "inuse_pages") == sizeof(int)) inuse_pages = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_inuse_pages)); else inuse_pages = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_inuse_pages)); } swap_map = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_map)); if (swap_file) { if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { vfsmnt = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_vfsmnt)); get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { devname = vfsmount_devname(file_to_vfsmnt(swap_file), buf1, BUFSIZE); get_pathname(file_to_dentry(swap_file), buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); if ((STREQ(devname, "devtmpfs") || STREQ(devname, "udev")) && !STRNEQ(buf, "/dev/")) string_insert("/dev", buf); } else { get_pathname(swap_file, buf, BUFSIZE, 1, 0); } } else sprintf(buf, "(unknown)"); smap = NULL; if (vt->flags & SWAPINFO_V1) { smap = (ushort *)GETBUF(sizeof(ushort) * max); if (!readmem(swap_map, KVADDR, smap, sizeof(ushort) * max, "swap_info swap_map data", RETURN_ON_ERROR|QUIET)) { if (swapflags & RETURN_ON_ERROR) { *totalswap_pages = swap_map; *totalused_pages = i; FREEBUF(smap); return FALSE; } else error(FATAL, "swap_info[%d].swap_map at %lx is inaccessible\n", i, swap_map); } } usedswap = 0; if (smap) { for (j = 0; j < max; j++) { switch (smap[j]) { case SWAP_MAP_BAD: case 0: continue; default: usedswap++; } } FREEBUF(smap); } else usedswap = inuse_pages; totalused += usedswap; usedswap <<= (PAGESHIFT() - 10); pct = (usedswap * 100)/pages; if (swapflags & VERBOSE) { sprintf(buf1, "%lx", (vt->flags & SWAPINFO_V2) ? swap_info_ptr : swap_info); sprintf(buf2, "%ldk", pages); sprintf(buf3, "%ldk", usedswap); sprintf(buf4, "%2ld%%", pct); sprintf(buf5, "%d", prio); fprintf(fp, "%s %s %s %s %s %s %s\n", mkstring(buf1, MAX(VADDR_PRLEN, strlen("SWAP_INFO_STRUCT")), CENTER|LJUST, NULL), swap_device ? "PARTITION" : " FILE ", mkstring(buf2, 10, CENTER|RJUST, NULL), mkstring(buf3, 10, CENTER|RJUST, NULL), mkstring(buf4, 4, CENTER|RJUST, NULL), mkstring(buf5, 4, RJUST, NULL), buf); } } if (totalswap_pages) *totalswap_pages = totalswap; if (totalused_pages) *totalused_pages = totalused; return TRUE; } /* * Determine the swap_info_struct usage. */ static void swap_info_init(void) { struct gnu_request *req; if (vt->flags & (SWAPINFO_V1|SWAPINFO_V2)) return; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); if ((get_symbol_type("swap_info", NULL, req) == TYPE_CODE_ARRAY) && ((req->target_typecode == TYPE_CODE_PTR) || (req->target_typecode == TYPE_CODE_STRUCT))) { switch (req->target_typecode) { case TYPE_CODE_STRUCT: vt->flags |= SWAPINFO_V1; break; case TYPE_CODE_PTR: vt->flags |= SWAPINFO_V2; break; } } else { if (THIS_KERNEL_VERSION >= LINUX(2,6,33)) vt->flags |= SWAPINFO_V2; else vt->flags |= SWAPINFO_V1; } FREEBUF(req); } /* * Translate a PTE into a swap device and offset string. */ char * swap_location(ulonglong pte, char *buf) { char swapdev[BUFSIZE]; if (!pte) return NULL; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) sprintf(buf, "%s OFFSET: %lld", get_swapdev(__swp_type(pte), swapdev), (ulonglong)__swp_offset(pte)); else sprintf(buf, "%s OFFSET: %llx", get_swapdev(SWP_TYPE(pte), swapdev), (ulonglong)SWP_OFFSET(pte)); return buf; } /* * Given the type field from a PTE, return the name of the swap device. */ static char * get_swapdev(ulong type, char *buf) { unsigned int i, swap_info_len; ulong swap_info, swap_info_ptr, swap_file; ulong vfsmnt; if (!symbol_exists("nr_swapfiles")) error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); if (!symbol_exists("swap_info")) error(FATAL, "swap_info doesn't exist in this kernel!\n"); swap_info_init(); swap_info = symbol_value("swap_info"); swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? i : get_array_length("swap_info", NULL, 0); sprintf(buf, "(unknown swap location)"); if (type >= swap_info_len) return buf; switch (vt->flags & (SWAPINFO_V1|SWAPINFO_V2)) { case SWAPINFO_V1: swap_info += type * SIZE(swap_info_struct); fill_swap_info(swap_info); break; case SWAPINFO_V2: swap_info += type * sizeof(void *); if (!readmem(swap_info, KVADDR, &swap_info_ptr, sizeof(void *), "swap_info pointer", RETURN_ON_ERROR|QUIET)) return buf; if (!swap_info_ptr) return buf; fill_swap_info(swap_info_ptr); break; } swap_file = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_file)); if (swap_file) { if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { vfsmnt = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_vfsmnt)); get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { get_pathname(file_to_dentry(swap_file), buf, BUFSIZE, 1, 0); } else { get_pathname(swap_file, buf, BUFSIZE, 1, 0); } } return buf; } /* * If not currently stashed, cache the passed-in swap_info_struct. */ static void fill_swap_info(ulong swap_info) { if (vt->last_swap_read == swap_info) return; if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) malloc(SIZE(swap_info_struct)))) error(FATAL, "cannot malloc swap_info_struct space\n"); readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), "fill_swap_info", FAULT_ON_ERROR); vt->last_swap_read = swap_info; } /* * If active, clear references to the swap_info references. */ void clear_swap_info_cache(void) { if (ACTIVE()) vt->last_swap_read = 0; } /* * Translage a vm_area_struct and virtual address into a filename * and offset string. */ #define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ static char * vma_file_offset(ulong vma, ulong vaddr, char *buf) { ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; ulong vfsmnt; char file[BUFSIZE]; char *vma_buf, *file_buf; if (!vma) return NULL; vma_buf = fill_vma_cache(vma); vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); if (!vm_file) goto no_file_offset; file_buf = fill_file_cache(vm_file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); if (!dentry) goto no_file_offset; file[0] = NULLCHAR; if (VALID_MEMBER(file_f_vfsmnt)) { vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); } else get_pathname(dentry, file, BUFSIZE, 1, 0); if (!strlen(file)) goto no_file_offset; vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_offset = vm_pgoff = 0xdeadbeef; if (VALID_MEMBER(vm_area_struct_vm_offset)) vm_offset = ULONG(vma_buf + OFFSET(vm_area_struct_vm_offset)); else if (VALID_MEMBER(vm_area_struct_vm_pgoff)) vm_pgoff = ULONG(vma_buf + OFFSET(vm_area_struct_vm_pgoff)); else goto no_file_offset; offset = 0; if (vm_offset != 0xdeadbeef) offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; else if (vm_pgoff != 0xdeadbeef) { offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; offset <<= PAGE_CACHE_SHIFT; } sprintf(buf, "%s OFFSET: %lx", file, offset); return buf; no_file_offset: return NULL; } /* * Translate a PTE into its physical address and flags. */ void cmd_pte(void) { int c; ulonglong pte; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { pte = htoll(args[optind], FAULT_ON_ERROR, NULL); machdep->translate_pte((ulong)pte, NULL, pte); optind++; } } static char *node_zone_hdr = "ZONE NAME SIZE"; /* * On systems supporting memory nodes, display the basic per-node data. */ static void dump_memory_nodes(int initialize) { int i, j; int n, id, node, flen, slen, badaddr; ulong node_mem_map; ulong temp_node_start_paddr; ulonglong node_start_paddr; ulong node_start_pfn; ulong node_start_mapnr; ulong node_spanned_pages, node_present_pages; ulong free_pages, zone_size, node_size, cum_zone_size; ulong zone_start_paddr, zone_start_mapnr, zone_mem_map; physaddr_t phys; ulong pp; ulong zone_start_pfn; ulong bdata; ulong pgdat; ulong node_zones; ulong value; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; struct node_table *nt; node = slen = 0; if (!(vt->flags & (NODES|NODES_ONLINE)) && initialize) { nt = &vt->node_table[0]; nt->node_id = 0; if (symbol_exists("contig_page_data")) nt->pgdat = symbol_value("contig_page_data"); else nt->pgdat = 0; nt->size = vt->total_pages; nt->mem_map = vt->mem_map; nt->start_paddr = 0; nt->start_mapnr = 0; if (CRASHDEBUG(1)) { fprintf(fp, "node_table[%d]: \n", 0); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } return; } if (initialize) { pgdat = UNINITIALIZED; /* * This order may have to change based upon architecture... */ if (symbol_exists("pgdat_list") && (VALID_MEMBER(pglist_data_node_next) || VALID_MEMBER(pglist_data_pgdat_next))) { get_symbol_data("pgdat_list", sizeof(void *), &pgdat); vt->flags &= ~NODES_ONLINE; } else if (vt->flags & NODES_ONLINE) { if ((node = next_online_node(0)) < 0) { error(WARNING, "cannot determine first node from node_online_map\n\n"); return; } if (!(pgdat = next_online_pgdat(node))) { error(WARNING, "cannot determine pgdat list for this kernel/architecture\n\n"); return; } } } else pgdat = vt->node_table[0].pgdat; if (initialize && (pgdat == UNINITIALIZED)) { error(WARNING, "cannot initialize pgdat list\n\n"); return; } for (n = 0, badaddr = FALSE; pgdat; n++) { if (n >= vt->numnodes) error(FATAL, "numnodes out of sync with pgdat_list?\n"); nt = &vt->node_table[n]; readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, sizeof(int), "pglist node_id", FAULT_ON_ERROR); if (VALID_MEMBER(pglist_data_node_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &node_mem_map, sizeof(ulong), "node_mem_map", FAULT_ON_ERROR); } else { node_mem_map = BADADDR; badaddr = TRUE; } if (VALID_MEMBER(pglist_data_node_start_paddr)) { readmem(pgdat+OFFSET(pglist_data_node_start_paddr), KVADDR, &temp_node_start_paddr, sizeof(ulong), "pglist node_start_paddr", FAULT_ON_ERROR); node_start_paddr = temp_node_start_paddr; } else if (VALID_MEMBER(pglist_data_node_start_pfn)) { readmem(pgdat+OFFSET(pglist_data_node_start_pfn), KVADDR, &node_start_pfn, sizeof(ulong), "pglist node_start_pfn", FAULT_ON_ERROR); node_start_mapnr = node_start_pfn; node_start_paddr = PTOB(node_start_pfn); if (badaddr && IS_SPARSEMEM()) { if (!verify_pfn(node_start_pfn)) error(WARNING, "questionable node_start_pfn: %lx\n", node_start_pfn); phys = PTOB(node_start_pfn); if (phys_to_page(phys, &pp)) node_mem_map = pp; } } else error(INFO, "cannot determine zone starting physical address\n"); if (VALID_MEMBER(pglist_data_node_start_mapnr)) readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), KVADDR, &node_start_mapnr, sizeof(ulong), "pglist node_start_mapnr", FAULT_ON_ERROR); if (VALID_MEMBER(pglist_data_node_size)) readmem(pgdat+OFFSET(pglist_data_node_size), KVADDR, &node_size, sizeof(ulong), "pglist node_size", FAULT_ON_ERROR); else if (VALID_MEMBER(pglist_data_node_spanned_pages)) { readmem(pgdat+OFFSET(pglist_data_node_spanned_pages), KVADDR, &node_spanned_pages, sizeof(ulong), "pglist node_spanned_pages", FAULT_ON_ERROR); node_size = node_spanned_pages; } else error(INFO, "cannot determine zone size\n"); if (VALID_MEMBER(pglist_data_node_present_pages)) readmem(pgdat+OFFSET(pglist_data_node_present_pages), KVADDR, &node_present_pages, sizeof(ulong), "pglist node_present_pages", FAULT_ON_ERROR); else node_present_pages = 0; if (VALID_MEMBER(pglist_data_bdata)) readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); else bdata = BADADDR; if (initialize) { nt->node_id = id; nt->pgdat = pgdat; if (VALID_MEMBER(zone_struct_memsize)) nt->size = 0; /* initialize below */ else nt->size = node_size; nt->present = node_present_pages; nt->mem_map = node_mem_map; nt->start_paddr = node_start_paddr; nt->start_mapnr = node_start_mapnr; if (CRASHDEBUG(1)) { fprintf(fp, "node_table[%d]: \n", n); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } } if (!initialize) { if (n) { fprintf(fp, "\n"); pad_line(fp, slen, '-'); } flen = MAX(VADDR_PRLEN, strlen("BOOTMEM_DATA")); fprintf(fp, "%sNODE %s %s %s %s\n", n ? "\n\n" : "", mkstring(buf1, 8, CENTER, "SIZE"), mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"), mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"), mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES")); node_zones = pgdat + OFFSET(pglist_data_node_zones); sprintf(buf5, " %2d %s %s %s %s\n", id, mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, MKSTR(node_size)), mkstring(buf2, flen, CENTER|LJUST|LONG_HEX, MKSTR(pgdat)), bdata == BADADDR ? mkstring(buf3, flen, CENTER, "----") : mkstring(buf3, flen, CENTER|LONG_HEX, MKSTR(bdata)), mkstring(buf4, flen, CENTER|LJUST|LONG_HEX, MKSTR(node_zones))); fprintf(fp, "%s", buf5); j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) + count_leading_spaces(buf4); for (i = 1; i < vt->nr_zones; i++) { node_zones += SIZE_OPTION(zone_struct, zone); INDENT(j); fprintf(fp, "%lx\n", node_zones); } fprintf(fp, "%s START_PADDR START_MAPNR\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(node_mem_map)), mkstring(buf2, strlen(" START_PADDR "), CENTER|LONGLONG_HEX|RJUST, MKSTR(&node_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(node_start_mapnr))); sprintf(buf2, "%s %s START_PADDR START_MAPNR", node_zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "MEM_MAP")); slen = strlen(buf2); fprintf(fp, "\n%s\n", buf2); } node_zones = pgdat + OFFSET(pglist_data_node_zones); cum_zone_size = 0; for (i = 0; i < vt->nr_zones; i++) { if (CRASHDEBUG(7)) fprintf(fp, "zone %d at %lx\n", i, node_zones); if (VALID_MEMBER(zone_struct_size)) readmem(node_zones+OFFSET(zone_struct_size), KVADDR, &zone_size, sizeof(ulong), "zone_struct size", FAULT_ON_ERROR); else if (VALID_MEMBER(zone_struct_memsize)) { readmem(node_zones+OFFSET(zone_struct_memsize), KVADDR, &zone_size, sizeof(ulong), "zone_struct memsize", FAULT_ON_ERROR); nt->size += zone_size; } else if (VALID_MEMBER(zone_spanned_pages)) { readmem(node_zones+ OFFSET(zone_spanned_pages), KVADDR, &zone_size, sizeof(ulong), "zone spanned_pages", FAULT_ON_ERROR); } else error(FATAL, "zone_struct has neither size nor memsize field\n"); readmem(node_zones+ OFFSET_OPTION(zone_struct_free_pages, zone_free_pages), KVADDR, &free_pages, sizeof(ulong), "zone[_struct] free_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET_OPTION(zone_struct_name, zone_name), KVADDR, &value, sizeof(void *), "zone[_struct] name", FAULT_ON_ERROR); if (!read_string(value, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown) "); if (VALID_STRUCT(zone_struct)) { if (VALID_MEMBER(zone_struct_zone_start_paddr)) { readmem(node_zones+OFFSET (zone_struct_zone_start_paddr), KVADDR, &zone_start_paddr, sizeof(ulong), "node_zones zone_start_paddr", FAULT_ON_ERROR); } else { readmem(node_zones+ OFFSET(zone_struct_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); } readmem(node_zones+ OFFSET(zone_struct_zone_start_mapnr), KVADDR, &zone_start_mapnr, sizeof(ulong), "node_zones zone_start_mapnr", FAULT_ON_ERROR); } else { readmem(node_zones+ OFFSET(zone_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); if (IS_SPARSEMEM()) { zone_mem_map = 0; zone_start_mapnr = 0; if (zone_size) { phys = PTOB(zone_start_pfn); zone_start_mapnr = phys/PAGESIZE(); } } else if (!(vt->flags & NODES) && INVALID_MEMBER(zone_zone_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &zone_mem_map, sizeof(void *), "contig_page_data mem_map", FAULT_ON_ERROR); if (zone_size) zone_mem_map += cum_zone_size * SIZE(page); } else readmem(node_zones+ OFFSET(zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); if (zone_mem_map) zone_start_mapnr = (zone_mem_map - node_mem_map) / SIZE(page); else if (!IS_SPARSEMEM()) zone_start_mapnr = 0; } if (IS_SPARSEMEM()) { zone_mem_map = 0; if (zone_size) { phys = PTOB(zone_start_pfn); if (phys_to_page(phys, &pp)) zone_mem_map = pp; } } else if (!(vt->flags & NODES) && INVALID_MEMBER(zone_struct_zone_mem_map) && INVALID_MEMBER(zone_zone_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &zone_mem_map, sizeof(void *), "contig_page_data mem_map", FAULT_ON_ERROR); if (zone_size) zone_mem_map += cum_zone_size * SIZE(page); else zone_mem_map = 0; } else readmem(node_zones+ OFFSET_OPTION(zone_struct_zone_mem_map, zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); if (!initialize) { fprintf(fp, " %2d %-9s %7ld ", i, buf1, zone_size); cum_zone_size += zone_size; fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), LONG_HEX|RJUST,MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); } node_zones += SIZE_OPTION(zone_struct, zone); } if (initialize) { if (vt->flags & NODES_ONLINE) { if ((node = next_online_node(node+1)) < 0) pgdat = 0; else if (!(pgdat = next_online_pgdat(node))) { error(WARNING, "cannot determine pgdat list for this kernel/architecture (node %d)\n\n", node); pgdat = 0; } } else readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } else { if ((n+1) < vt->numnodes) pgdat = vt->node_table[n+1].pgdat; else pgdat = 0; } } if (n != vt->numnodes) { if (CRASHDEBUG(2)) error(NOTE, "changing numnodes from %d to %d\n", vt->numnodes, n); vt->numnodes = n; } if (!initialize && IS_SPARSEMEM()) dump_mem_sections(); } /* * At least verify that page-shifted physical address. */ static int verify_pfn(ulong pfn) { int i; physaddr_t mask; if (!machdep->max_physmem_bits) return TRUE; mask = 0; for (i = machdep->max_physmem_bits; i < machdep->bits; i++) mask |= ((physaddr_t)1 << i); if (mask & PTOB(pfn)) return FALSE; return TRUE; } static void dump_zone_stats(void) { int i, n; ulong pgdat, node_zones; char *zonebuf; char buf1[BUFSIZE]; int ivalue; ulong value1; ulong value2; ulong value3; ulong value4; ulong value5; ulong value6; long min, low, high; value1 = value2 = value3 = value4 = value5 = value6 = 0; min = low = high = 0; pgdat = vt->node_table[0].pgdat; zonebuf = GETBUF(SIZE_OPTION(zone_struct, zone)); vm_stat_init(); for (n = 0; pgdat; n++) { node_zones = pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (!readmem(node_zones, KVADDR, zonebuf, SIZE_OPTION(zone_struct, zone), "zone buffer", FAULT_ON_ERROR)) break; value1 = ULONG(zonebuf + OFFSET_OPTION(zone_struct_name, zone_name)); if (!read_string(value1, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown) "); if (VALID_MEMBER(zone_struct_size)) value1 = value6 = ULONG(zonebuf + OFFSET(zone_struct_size)); else if (VALID_MEMBER(zone_struct_memsize)) { value1 = value6 = ULONG(zonebuf + OFFSET(zone_struct_memsize)); } else if (VALID_MEMBER(zone_spanned_pages)) { value1 = ULONG(zonebuf + OFFSET(zone_spanned_pages)); value6 = ULONG(zonebuf + OFFSET(zone_present_pages)); } else error(FATAL, "zone struct has unknown size field\n"); if (VALID_MEMBER(zone_watermark)) { if (!enumerator_value("WMARK_MIN", &min) || !enumerator_value("WMARK_LOW", &low) || !enumerator_value("WMARK_HIGH", &high)) { min = 0; low = 1; high = 2; } value2 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * min)); value3 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * low)); value4 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * high)); } else { value2 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_min, zone_struct_pages_min)); value3 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_low, zone_struct_pages_low)); value4 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_high, zone_struct_pages_high)); } value5 = ULONG(zonebuf + OFFSET_OPTION(zone_free_pages, zone_struct_free_pages)); fprintf(fp, "NODE: %d ZONE: %d ADDR: %lx NAME: \"%s\"\n", n, i, node_zones, buf1); if (!value1) { fprintf(fp, " [unpopulated]\n"); goto next_zone; } fprintf(fp, " SIZE: %ld", value1); if (value6 < value1) fprintf(fp, " PRESENT: %ld", value6); fprintf(fp, " MIN/LOW/HIGH: %ld/%ld/%ld", value2, value3, value4); if (VALID_MEMBER(zone_vm_stat)) dump_vm_stat("NR_FREE_PAGES", (long *)&value5, node_zones + OFFSET(zone_vm_stat)); if (VALID_MEMBER(zone_nr_active) && VALID_MEMBER(zone_nr_inactive)) { value1 = ULONG(zonebuf + OFFSET(zone_nr_active)); value2 = ULONG(zonebuf + OFFSET(zone_nr_inactive)); fprintf(fp, "\n NR_ACTIVE: %ld NR_INACTIVE: %ld FREE: %ld\n", value1, value2, value5); if (VALID_MEMBER(zone_vm_stat)) { fprintf(fp, " VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } } else if (VALID_MEMBER(zone_vm_stat) && dump_vm_stat("NR_ACTIVE", (long *)&value1, node_zones + OFFSET(zone_vm_stat)) && dump_vm_stat("NR_INACTIVE", (long *)&value2, node_zones + OFFSET(zone_vm_stat))) { fprintf(fp, "\n VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } else { if (VALID_MEMBER(zone_vm_stat)) { fprintf(fp, "\n VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } else fprintf(fp, " FREE: %ld\n", value5); } if (VALID_MEMBER(zone_all_unreclaimable)) { ivalue = UINT(zonebuf + OFFSET(zone_all_unreclaimable)); fprintf(fp, " ALL_UNRECLAIMABLE: %s ", ivalue ? "yes" : "no"); } else if (VALID_MEMBER(zone_flags) && enumerator_value("ZONE_ALL_UNRECLAIMABLE", (long *)&value1)) { value2 = ULONG(zonebuf + OFFSET(zone_flags)); value3 = value2 & (1 << value1); fprintf(fp, " ALL_UNRECLAIMABLE: %s ", value3 ? "yes" : "no"); } if (VALID_MEMBER(zone_pages_scanned)) { value1 = ULONG(zonebuf + OFFSET(zone_pages_scanned)); fprintf(fp, "PAGES_SCANNED: %lu ", value1); } fprintf(fp, "\n"); next_zone: fprintf(fp, "\n"); node_zones += SIZE_OPTION(zone_struct, zone); } if ((n+1) < vt->numnodes) pgdat = vt->node_table[n+1].pgdat; else pgdat = 0; } FREEBUF(zonebuf); } /* * Gather essential information regarding each memory node. */ static void node_table_init(void) { int n; ulong pgdat; /* * Override numnodes -- some kernels may leave it at 1 on a system * with multiple memory nodes. */ if ((vt->flags & NODES) && (VALID_MEMBER(pglist_data_node_next) || VALID_MEMBER(pglist_data_pgdat_next))) { get_symbol_data("pgdat_list", sizeof(void *), &pgdat); for (n = 0; pgdat; n++) { readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } if (n != vt->numnodes) { if (CRASHDEBUG(2)) error(NOTE, "changing numnodes from %d to %d\n", vt->numnodes, n); vt->numnodes = n; } } else vt->flags &= ~NODES; if (!(vt->node_table = (struct node_table *) malloc(sizeof(struct node_table) * vt->numnodes))) error(FATAL, "cannot malloc node_table %s(%d nodes)", vt->numnodes > 1 ? "array " : "", vt->numnodes); BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes); dump_memory_nodes(MEMORY_NODES_INITIALIZE); qsort((void *)vt->node_table, (size_t)vt->numnodes, sizeof(struct node_table), compare_node_data); if (CRASHDEBUG(2)) dump_memory_nodes(MEMORY_NODES_DUMP); } /* * The comparison function must return an integer less than, * equal to, or greater than zero if the first argument is * considered to be respectively less than, equal to, or * greater than the second. If two members compare as equal, * their order in the sorted array is undefined. */ static int compare_node_data(const void *v1, const void *v2) { struct node_table *t1, *t2; t1 = (struct node_table *)v1; t2 = (struct node_table *)v2; return (t1->node_id < t2->node_id ? -1 : t1->node_id == t2->node_id ? 0 : 1); } /* * Depending upon the processor, and whether we're running live or on a * dumpfile, get the system page size. */ uint memory_page_size(void) { uint psz; if (machdep->pagesize) return machdep->pagesize; if (REMOTE_MEMSRC()) return remote_page_size(); switch (pc->flags & MEMORY_SOURCES) { case DISKDUMP: psz = diskdump_page_size(); break; case XENDUMP: psz = xendump_page_size(); break; case KDUMP: psz = kdump_page_size(); break; case NETDUMP: psz = netdump_page_size(); break; case MCLXCD: psz = (uint)mclx_page_size(); break; case LKCD: #if 0 /* REMIND: */ psz = lkcd_page_size(); /* dh_dump_page_size is HW page size; should add dh_page_size */ #else psz = (uint)getpagesize(); #endif break; case DEVMEM: case MEMMOD: case CRASHBUILTIN: case KVMDUMP: case PROC_KCORE: psz = (uint)getpagesize(); break; case S390D: psz = s390_page_size(); break; case SADUMP: psz = sadump_page_size(); break; case VMWARE_VMSS: psz = vmware_vmss_page_size(); break; default: psz = 0; error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", pc->flags & MEMORY_SOURCES); } return psz; } /* * If the page size cannot be determined by the dumpfile (like kdump), * and the processor default cannot be used, allow the force-feeding * of a crash command-line page size option. */ void force_page_size(char *s) { int k, err; ulong psize; k = 1; err = FALSE; psize = 0; switch (LASTCHAR(s)) { case 'k': case 'K': LASTCHAR(s) = NULLCHAR; if (!decimal(s, 0)) { err = TRUE; break; } k = 1024; /* FALLTHROUGH */ default: if (decimal(s, 0)) psize = dtol(s, QUIET|RETURN_ON_ERROR, &err); else if (hexadecimal(s, 0)) psize = htol(s, QUIET|RETURN_ON_ERROR, &err); else err = TRUE; break; } if (err) error(INFO, "invalid page size: %s\n", s); else machdep->pagesize = psize * k; } /* * Return the vmalloc address referenced by the first vm_struct * on the vmlist. This can normally be used by the machine-specific * xxx_vmalloc_start() routines. */ ulong first_vmalloc_address(void) { static ulong vmalloc_start = 0; ulong vm_struct, vmap_area; if (DUMPFILE() && vmalloc_start) return vmalloc_start; if (vt->flags & USE_VMAP_AREA) { get_symbol_data("vmap_area_list", sizeof(void *), &vmap_area); if (!vmap_area) return 0; if (!readmem(vmap_area - OFFSET(vmap_area_list) + OFFSET(vmap_area_va_start), KVADDR, &vmalloc_start, sizeof(void *), "first vmap_area va_start", RETURN_ON_ERROR)) non_matching_kernel(); } else if (kernel_symbol_exists("vmlist")) { get_symbol_data("vmlist", sizeof(void *), &vm_struct); if (!vm_struct) return 0; if (!readmem(vm_struct+OFFSET(vm_struct_addr), KVADDR, &vmalloc_start, sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) non_matching_kernel(); } return vmalloc_start; } /* * Return the highest vmalloc address in the vmlist. */ ulong last_vmalloc_address(void) { struct meminfo meminfo; static ulong vmalloc_limit = 0; if (!vmalloc_limit || ACTIVE()) { BZERO(&meminfo, sizeof(struct meminfo)); meminfo.memtype = KVADDR; meminfo.spec_addr = 0; meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); dump_vmlist(&meminfo); vmalloc_limit = meminfo.retval; } return vmalloc_limit; } /* * Determine whether an identity-mapped virtual address * refers to an existant physical page, and if not bump * it up to the next node. */ static int next_identity_mapping(ulong vaddr, ulong *nextvaddr) { int n, retval; struct node_table *nt; ulonglong paddr, pstart, psave, pend; ulong node_size; paddr = VTOP(vaddr); psave = 0; retval = FALSE; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; pstart = nt->start_paddr; pend = pstart + ((ulonglong)node_size * PAGESIZE()); /* * Check the next node. */ if (paddr >= pend) continue; /* * Bump up to the next node, but keep looking in * case of non-sequential nodes. */ if (paddr < pstart) { if (psave && (psave < pstart)) continue; *nextvaddr = PTOV(pstart); psave = pstart; retval = TRUE; continue; } /* * We're in the physical range. */ *nextvaddr = vaddr; retval = TRUE; break; } return retval; } /* * Return the L1 cache size in bytes, which can be found stored in the * cache_cache. */ int l1_cache_size(void) { ulong cache; ulong c_align; int colour_off; int retval; retval = -1; if (VALID_MEMBER(kmem_cache_s_c_align)) { cache = symbol_value("cache_cache"); readmem(cache+OFFSET(kmem_cache_s_c_align), KVADDR, &c_align, sizeof(ulong), "c_align", FAULT_ON_ERROR); retval = (int)c_align; } else if (VALID_MEMBER(kmem_cache_s_colour_off)) { cache = symbol_value("cache_cache"); readmem(cache+OFFSET(kmem_cache_s_colour_off), KVADDR, &colour_off, sizeof(int), "colour_off", FAULT_ON_ERROR); retval = colour_off; } return retval; } /* * Multi-purpose routine used to query/control dumpfile memory usage. */ int dumpfile_memory(int cmd) { int retval; retval = 0; switch (cmd) { case DUMPFILE_MEM_USED: if (REMOTE_DUMPFILE()) retval = remote_memory_used(); else if (pc->flags & NETDUMP) retval = netdump_memory_used(); else if (pc->flags & KDUMP) retval = kdump_memory_used(); else if (pc->flags & XENDUMP) retval = xendump_memory_used(); else if (pc->flags & KVMDUMP) retval = kvmdump_memory_used(); else if (pc->flags & DISKDUMP) retval = diskdump_memory_used(); else if (pc->flags & LKCD) retval = lkcd_memory_used(); else if (pc->flags & MCLXCD) retval = vas_memory_used(); else if (pc->flags & S390D) retval = s390_memory_used(); else if (pc->flags & SADUMP) retval = sadump_memory_used(); break; case DUMPFILE_FREE_MEM: if (REMOTE_DUMPFILE()) retval = remote_free_memory(); else if (pc->flags & NETDUMP) retval = netdump_free_memory(); else if (pc->flags & KDUMP) retval = kdump_free_memory(); else if (pc->flags & XENDUMP) retval = xendump_free_memory(); else if (pc->flags & KVMDUMP) retval = kvmdump_free_memory(); else if (pc->flags & DISKDUMP) retval = diskdump_free_memory(); else if (pc->flags & LKCD) retval = lkcd_free_memory(); else if (pc->flags & MCLXCD) retval = vas_free_memory(NULL); else if (pc->flags & S390D) retval = s390_free_memory(); else if (pc->flags & SADUMP) retval = sadump_free_memory(); break; case DUMPFILE_MEM_DUMP: if (REMOTE_DUMPFILE()) retval = remote_memory_dump(0); else if (pc->flags & NETDUMP) retval = netdump_memory_dump(fp); else if (pc->flags & KDUMP) retval = kdump_memory_dump(fp); else if (pc->flags & XENDUMP) retval = xendump_memory_dump(fp); else if (pc->flags & KVMDUMP) retval = kvmdump_memory_dump(fp); else if (pc->flags & DISKDUMP) retval = diskdump_memory_dump(fp); else if (pc->flags & LKCD) retval = lkcd_memory_dump(set_lkcd_fp(fp)); else if (pc->flags & MCLXCD) retval = vas_memory_dump(fp); else if (pc->flags & S390D) retval = s390_memory_dump(fp); else if (pc->flags & PROC_KCORE) retval = kcore_memory_dump(fp); else if (pc->flags & SADUMP) retval = sadump_memory_dump(fp); break; case DUMPFILE_ENVIRONMENT: if (pc->flags & LKCD) { set_lkcd_fp(fp); dump_lkcd_environment(0); } else if (pc->flags & REM_LKCD) retval = remote_memory_dump(VERBOSE); break; } return retval; } /* * Functions for sparse mem support */ ulong sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr) { return coded_mem_map + (section_nr_to_pfn(section_nr) * SIZE(page)); } void sparse_mem_init(void) { ulong addr; ulong mem_section_size; int len, dimension; if (!IS_SPARSEMEM()) return; MEMBER_OFFSET_INIT(mem_section_section_mem_map, "mem_section", "section_mem_map"); if (!MAX_PHYSMEM_BITS()) error(FATAL, "CONFIG_SPARSEMEM kernels not supported for this architecture\n"); if (((len = get_array_length("mem_section", &dimension, 0)) == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) || !dimension) vt->flags |= SPARSEMEM_EX; if (IS_SPARSEMEM_EX()) { machdep->sections_per_root = _SECTIONS_PER_ROOT_EXTREME(); mem_section_size = sizeof(void *) * NR_SECTION_ROOTS(); } else { machdep->sections_per_root = _SECTIONS_PER_ROOT(); mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS(); } if (CRASHDEBUG(1)) { fprintf(fp, "PAGESIZE=%d\n",PAGESIZE()); fprintf(fp,"mem_section_size = %ld\n", mem_section_size); fprintf(fp, "NR_SECTION_ROOTS = %ld\n", NR_SECTION_ROOTS()); fprintf(fp, "NR_MEM_SECTIONS = %ld\n", NR_MEM_SECTIONS()); fprintf(fp, "SECTIONS_PER_ROOT = %ld\n", SECTIONS_PER_ROOT() ); fprintf(fp, "SECTION_ROOT_MASK = 0x%lx\n", SECTION_ROOT_MASK()); fprintf(fp, "PAGES_PER_SECTION = %ld\n", PAGES_PER_SECTION()); if (IS_SPARSEMEM_EX() && !len) error(WARNING, "SPARSEMEM_EX: questionable section values\n"); } if (!(vt->mem_sec = (void *)malloc(mem_section_size))) error(FATAL, "cannot malloc mem_sec cache\n"); if (!(vt->mem_section = (char *)malloc(SIZE(mem_section)))) error(FATAL, "cannot malloc mem_section cache\n"); addr = symbol_value("mem_section"); readmem(addr, KVADDR,vt->mem_sec ,mem_section_size, "memory section root table", FAULT_ON_ERROR); } char * read_mem_section(ulong addr) { if ((addr == 0) || !IS_KVADDR(addr)) return 0; readmem(addr, KVADDR, vt->mem_section, SIZE(mem_section), "memory section", FAULT_ON_ERROR); return vt->mem_section; } ulong nr_to_section(ulong nr) { ulong addr; ulong *mem_sec = vt->mem_sec; if (IS_SPARSEMEM_EX()) { if (SECTION_NR_TO_ROOT(nr) >= NR_SECTION_ROOTS()) { if (!STREQ(pc->curcmd, "rd") && !STREQ(pc->curcmd, "search") && !STREQ(pc->curcmd, "kmem")) error(WARNING, "sparsemem: invalid section number: %ld\n", nr); return 0; } } if (IS_SPARSEMEM_EX()) { if ((mem_sec[SECTION_NR_TO_ROOT(nr)] == 0) || !IS_KVADDR(mem_sec[SECTION_NR_TO_ROOT(nr)])) return 0; addr = mem_sec[SECTION_NR_TO_ROOT(nr)] + (nr & SECTION_ROOT_MASK()) * SIZE(mem_section); } else addr = symbol_value("mem_section") + (SECTIONS_PER_ROOT() * SECTION_NR_TO_ROOT(nr) + (nr & SECTION_ROOT_MASK())) * SIZE(mem_section); if (!IS_KVADDR(addr)) return 0; return addr; } /* * We use the lower bits of the mem_map pointer to store * a little bit of information. There should be at least * 3 bits here due to 32-bit alignment. */ #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) #define SECTION_MAP_LAST_BIT (1UL<<2) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) int valid_section(ulong addr) { char *mem_section; if ((mem_section = read_mem_section(addr))) return (ULONG(mem_section + OFFSET(mem_section_section_mem_map)) && SECTION_MARKED_PRESENT); return 0; } int section_has_mem_map(ulong addr) { char *mem_section; if ((mem_section = read_mem_section(addr))) return (ULONG(mem_section + OFFSET(mem_section_section_mem_map)) && SECTION_HAS_MEM_MAP); return 0; } ulong section_mem_map_addr(ulong addr) { char *mem_section; ulong map; if ((mem_section = read_mem_section(addr))) { map = ULONG(mem_section + OFFSET(mem_section_section_mem_map)); map &= SECTION_MAP_MASK; return map; } return 0; } ulong valid_section_nr(ulong nr) { ulong addr = nr_to_section(nr); if (valid_section(addr)) return addr; return 0; } ulong pfn_to_map(ulong pfn) { ulong section, page_offset; ulong section_nr; ulong coded_mem_map, mem_map; section_nr = pfn_to_section_nr(pfn); if (!(section = valid_section_nr(section_nr))) return 0; if (section_has_mem_map(section)) { page_offset = pfn - section_nr_to_pfn(section_nr); coded_mem_map = section_mem_map_addr(section); mem_map = sparse_decode_mem_map(coded_mem_map, section_nr) + (page_offset * SIZE(page)); return mem_map; } return 0; } void dump_mem_sections(void) { ulong nr,addr; ulong nr_mem_sections; ulong coded_mem_map, mem_map, pfn; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; nr_mem_sections = NR_MEM_SECTIONS(); fprintf(fp, "\n"); pad_line(fp, BITS32() ? 59 : 67, '-'); fprintf(fp, "\n\nNR %s %s %s PFN\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SECTION"), mkstring(buf2, MAX(VADDR_PRLEN,strlen("CODED_MEM_MAP")), CENTER|LJUST, "CODED_MEM_MAP"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); for (nr = 0; nr < nr_mem_sections ; nr++) { if ((addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(addr); mem_map = sparse_decode_mem_map(coded_mem_map,nr); pfn = section_nr_to_pfn(nr); fprintf(fp, "%2ld %s %s %s %s\n", nr, mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(addr)), mkstring(buf2, MAX(VADDR_PRLEN, strlen("CODED_MEM_MAP")), CENTER|LONG_HEX|RJUST, MKSTR(coded_mem_map)), mkstring(buf3, VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(mem_map)), pc->output_radix == 10 ? mkstring(buf4, VADDR_PRLEN, LONG_DEC|LJUST, MKSTR(pfn)) : mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(pfn))); } } } void list_mem_sections(void) { ulong nr,addr; ulong nr_mem_sections = NR_MEM_SECTIONS(); ulong coded_mem_map; for (nr = 0; nr <= nr_mem_sections ; nr++) { if ((addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(addr); fprintf(fp, "nr=%ld section = %lx coded_mem_map=%lx pfn=%ld mem_map=%lx\n", nr, addr, coded_mem_map, section_nr_to_pfn(nr), sparse_decode_mem_map(coded_mem_map,nr)); } } } /* * For kernels containing the node_online_map or node_states[], * return the number of online node bits set. */ static int get_nodes_online(void) { int i, len, online; struct gnu_request req; ulong *maskptr; long N_ONLINE; ulong mapaddr; if (!symbol_exists("node_online_map") && !symbol_exists("node_states")) return 0; len = mapaddr = 0; if (symbol_exists("node_online_map")) { if (LKCD_KERNTYPES()) { if ((len = STRUCT_SIZE("nodemask_t")) < 0) error(FATAL, "cannot determine type nodemask_t\n"); mapaddr = symbol_value("node_online_map"); } else { len = get_symbol_type("node_online_map", NULL, &req) == TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; mapaddr = symbol_value("node_online_map"); } } else if (symbol_exists("node_states")) { if ((get_symbol_type("node_states", NULL, &req) != TYPE_CODE_ARRAY) || !(len = get_array_length("node_states", NULL, 0)) || !enumerator_value("N_ONLINE", &N_ONLINE)) return 0; len = req.length / len; mapaddr = symbol_value("node_states") + (N_ONLINE * len); } if (!(vt->node_online_map = (ulong *)malloc(len))) error(FATAL, "cannot malloc node_online_map\n"); if (!readmem(mapaddr, KVADDR, (void *)&vt->node_online_map[0], len, "node_online_map", QUIET|RETURN_ON_ERROR)) error(FATAL, "cannot read node_online_map/node_states\n"); vt->node_online_map_len = len/sizeof(ulong); online = 0; maskptr = (ulong *)vt->node_online_map; for (i = 0; i < vt->node_online_map_len; i++, maskptr++) online += count_bits_long(*maskptr); if (CRASHDEBUG(1)) { fprintf(fp, "node_online_map: ["); for (i = 0; i < vt->node_online_map_len; i++) fprintf(fp, "%s%lx", i ? ", " : "", vt->node_online_map[i]); fprintf(fp, "] -> nodes online: %d\n", online); } if (online) vt->numnodes = online; return online; } /* * Return the next node index, with "first" being the first acceptable node. */ static int next_online_node(int first) { int i, j, node; ulong mask, *maskptr; if ((first/BITS_PER_LONG) >= vt->node_online_map_len) { error(INFO, "next_online_node: %d is too large!\n", first); return -1; } maskptr = (ulong *)vt->node_online_map; for (i = node = 0; i < vt->node_online_map_len; i++, maskptr++) { mask = *maskptr; for (j = 0; j < BITS_PER_LONG; j++, node++) { if (mask & 1) { if (node >= first) return node; } mask >>= 1; } } return -1; } /* * Modify appropriately for architecture/kernel nuances. */ static ulong next_online_pgdat(int node) { char buf[BUFSIZE]; ulong pgdat; /* * Default -- look for type: struct pglist_data node_data[] */ if (LKCD_KERNTYPES()) { if (!kernel_symbol_exists("node_data")) goto pgdat2; /* * Just index into node_data[] without checking that it is * an array; kerntypes have no such symbol information. */ } else { if (get_symbol_type("node_data", NULL, NULL) != TYPE_CODE_ARRAY) goto pgdat2; open_tmpfile(); sprintf(buf, "whatis node_data"); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); goto pgdat2; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); if ((!strstr(buf, "struct pglist_data *") && !strstr(buf, "pg_data_t *")) || (count_chars(buf, '[') != 1) || (count_chars(buf, ']') != 1)) goto pgdat2; } if (!readmem(symbol_value("node_data") + (node * sizeof(void *)), KVADDR, &pgdat, sizeof(void *), "node_data", RETURN_ON_ERROR) || !IS_KVADDR(pgdat)) goto pgdat2; return pgdat; pgdat2: if (LKCD_KERNTYPES()) { if (!kernel_symbol_exists("pgdat_list")) goto pgdat3; } else { if (get_symbol_type("pgdat_list",NULL,NULL) != TYPE_CODE_ARRAY) goto pgdat3; open_tmpfile(); sprintf(buf, "whatis pgdat_list"); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); goto pgdat3; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); if ((!strstr(buf, "struct pglist_data *") && !strstr(buf, "pg_data_t *")) || (count_chars(buf, '[') != 1) || (count_chars(buf, ']') != 1)) goto pgdat3; } if (!readmem(symbol_value("pgdat_list") + (node * sizeof(void *)), KVADDR, &pgdat, sizeof(void *), "pgdat_list", RETURN_ON_ERROR) || !IS_KVADDR(pgdat)) goto pgdat3; return pgdat; pgdat3: if (symbol_exists("contig_page_data") && (node == 0)) return symbol_value("contig_page_data"); return 0; } /* * Make the vm_stat[] array contents easily accessible. */ static int vm_stat_init(void) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int i, count, stringlen, total; int c ATTRIBUTE_UNUSED; struct gnu_request *req; char *start; long enum_value; if (vt->flags & VM_STAT) return TRUE; if ((vt->nr_vm_stat_items == -1) || !symbol_exists("vm_stat")) goto bailout; /* * look for type: type = atomic_long_t [] */ if (LKCD_KERNTYPES()) { if (!symbol_exists("vm_stat")) goto bailout; /* * Just assume that vm_stat is an array; there is * no symbol info in a kerntypes file. */ } else { if (!symbol_exists("vm_stat") || get_symbol_type("vm_stat", NULL, NULL) != TYPE_CODE_ARRAY) goto bailout; vt->nr_vm_stat_items = get_array_length("vm_stat", NULL, 0); } open_tmpfile(); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = "zone_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); FREEBUF(req); stringlen = 1; count = -1; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; clean_line(buf); c = parse_line(buf, arglist); if (STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) { if (LKCD_KERNTYPES()) vt->nr_vm_stat_items = MAX(atoi(arglist[2]), count); break; } else { stringlen += strlen(arglist[0]); count++; } } total = stringlen + vt->nr_vm_stat_items + (sizeof(void *) * vt->nr_vm_stat_items); if (!(vt->vm_stat_items = (char **)malloc(total))) { close_tmpfile(); error(FATAL, "cannot malloc vm_stat_items cache\n"); } BZERO(vt->vm_stat_items, total); start = (char *)&vt->vm_stat_items[vt->nr_vm_stat_items]; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; c = parse_line(buf, arglist); if (enumerator_value(arglist[0], &enum_value)) i = enum_value; else { close_tmpfile(); goto bailout; } if (i < vt->nr_vm_stat_items) { vt->vm_stat_items[i] = start; strcpy(start, arglist[0]); start += strlen(arglist[0]) + 1; } } close_tmpfile(); vt->flags |= VM_STAT; return TRUE; bailout: vt->nr_vm_stat_items = -1; return FALSE; } /* * Either dump all vm_stat entries, or return the value of * the specified vm_stat item. Use the global counter unless * a zone-specific address is passed. */ static int dump_vm_stat(char *item, long *retval, ulong zone) { char *buf; ulong *vp; ulong location; int i, maxlen, len; if (!vm_stat_init()) { if (!item) if (CRASHDEBUG(1)) error(INFO, "vm_stat not available in this kernel\n"); return FALSE; } buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items); location = zone ? zone : symbol_value("vm_stat"); readmem(location, KVADDR, buf, sizeof(ulong) * vt->nr_vm_stat_items, "vm_stat", FAULT_ON_ERROR); if (!item) { if (!zone) fprintf(fp, " VM_STAT:\n"); for (i = maxlen = 0; i < vt->nr_vm_stat_items; i++) if ((len = strlen(vt->vm_stat_items[i])) > maxlen) maxlen = len; vp = (ulong *)buf; for (i = 0; i < vt->nr_vm_stat_items; i++) fprintf(fp, "%s%s: %ld\n", space(maxlen - strlen(vt->vm_stat_items[i])), vt->vm_stat_items[i], vp[i]); return TRUE; } vp = (ulong *)buf; for (i = 0; i < vt->nr_vm_stat_items; i++) { if (STREQ(vt->vm_stat_items[i], item)) { *retval = vp[i]; return TRUE; } } return FALSE; } /* * Dump the cumulative totals of the per_cpu__page_states counters. */ int dump_page_states(void) { struct syment *sp; ulong addr, value; int i, c, fd, len, instance, members; char buf[BUFSIZE]; char *arglist[MAXARGS]; struct entry { char *name; ulong value; } *entry_list; struct stat stat; char *namebuf, *nameptr; if (!(sp = per_cpu_symbol_search("per_cpu__page_states"))) { if (CRASHDEBUG(1)) error(INFO, "per_cpu__page_states" "not available in this kernel\n"); return FALSE; } instance = members = len = 0; sprintf(buf, "ptype struct page_state"); open_tmpfile(); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); return FALSE; } fflush(pc->tmpfile); fd = fileno(pc->tmpfile); fstat(fd, &stat); namebuf = GETBUF(stat.st_size); nameptr = namebuf; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state") || strstr(buf, "}")) continue; members++; } entry_list = (struct entry *) GETBUF(sizeof(struct entry) * members); rewind(pc->tmpfile); i = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state") || strstr(buf, "}")) continue; strip_ending_char(strip_linefeeds(buf), ';'); c = parse_line(buf, arglist); strcpy(nameptr, arglist[c-1]); entry_list[i].name = nameptr; if (strlen(nameptr) > len) len = strlen(nameptr); nameptr += strlen(nameptr)+2; i++; } close_tmpfile(); open_tmpfile(); for (c = 0; c < kt->cpus; c++) { addr = sp->value + kt->__per_cpu_offset[c]; dump_struct("page_state", addr, RADIX(16)); } i = 0; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state")) { instance++; i = 0; continue; } if (strstr(buf, "}")) continue; strip_linefeeds(buf); extract_hex(buf, &value, ',', TRUE); entry_list[i].value += value; i++; } close_tmpfile(); fprintf(fp, " PAGE_STATES:\n"); for (i = 0; i < members; i++) { sprintf(buf, "%s", entry_list[i].name); fprintf(fp, "%s", mkstring(buf, len+2, RJUST, 0)); fprintf(fp, ": %ld\n", entry_list[i].value); } FREEBUF(namebuf); FREEBUF(entry_list); return TRUE; } /* * Dump the cumulative totals of the per_cpu__vm_event_state * counters. */ static int dump_vm_event_state(void) { int i, c, maxlen, len; struct syment *sp; ulong addr; ulong *events, *cumulative; if (!vm_event_state_init()) return FALSE; events = (ulong *)GETBUF((sizeof(ulong) * vt->nr_vm_event_items) * 2); cumulative = &events[vt->nr_vm_event_items]; sp = per_cpu_symbol_search("per_cpu__vm_event_states"); for (c = 0; c < kt->cpus; c++) { addr = sp->value + kt->__per_cpu_offset[c]; if (CRASHDEBUG(1)) { fprintf(fp, "[%d]: %lx\n", c, addr); dump_struct("vm_event_state", addr, RADIX(16)); } readmem(addr, KVADDR, events, sizeof(ulong) * vt->nr_vm_event_items, "vm_event_states buffer", FAULT_ON_ERROR); for (i = 0; i < vt->nr_vm_event_items; i++) cumulative[i] += events[i]; } fprintf(fp, "\n VM_EVENT_STATES:\n"); for (i = maxlen = 0; i < vt->nr_vm_event_items; i++) if ((len = strlen(vt->vm_event_items[i])) > maxlen) maxlen = len; for (i = 0; i < vt->nr_vm_event_items; i++) fprintf(fp, "%s%s: %lu\n", space(maxlen - strlen(vt->vm_event_items[i])), vt->vm_event_items[i], cumulative[i]); FREEBUF(events); return TRUE; } static int vm_event_state_init(void) { int i, stringlen, total; int c ATTRIBUTE_UNUSED; long count, enum_value; struct gnu_request *req; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *start; if (vt->flags & VM_EVENT) return TRUE; if ((vt->nr_vm_event_items == -1) || !per_cpu_symbol_search("per_cpu__vm_event_states")) goto bailout; if (!enumerator_value("NR_VM_EVENT_ITEMS", &count)) return FALSE; vt->nr_vm_event_items = count; open_tmpfile(); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = "vm_event_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); FREEBUF(req); stringlen = 1; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; clean_line(buf); c = parse_line(buf, arglist); if (STREQ(arglist[0], "NR_VM_EVENT_ITEMS")) break; else stringlen += strlen(arglist[0]); } total = stringlen + vt->nr_vm_event_items + (sizeof(void *) * vt->nr_vm_event_items); if (!(vt->vm_event_items = (char **)malloc(total))) { close_tmpfile(); error(FATAL, "cannot malloc vm_event_items cache\n"); } BZERO(vt->vm_event_items, total); start = (char *)&vt->vm_event_items[vt->nr_vm_event_items]; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; c = parse_line(buf, arglist); if (enumerator_value(arglist[0], &enum_value)) i = enum_value; else { close_tmpfile(); goto bailout; } if (i < vt->nr_vm_event_items) { vt->vm_event_items[i] = start; strcpy(start, arglist[0]); start += strlen(arglist[0]) + 1; } } close_tmpfile(); vt->flags |= VM_EVENT; return TRUE; bailout: vt->nr_vm_event_items = -1; return FALSE; } /* * Dump the per-cpu offset values that are used to * resolve per-cpu symbol values. */ static void dump_per_cpu_offsets(void) { int c; char buf[BUFSIZE]; fprintf(fp, "PER-CPU OFFSET VALUES:\n"); for (c = 0; c < kt->cpus; c++) { sprintf(buf, "CPU %d", c); fprintf(fp, "%7s: %lx", buf, kt->__per_cpu_offset[c]); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } } /* * Dump the value(s) of a page->flags bitmap. */ void dump_page_flags(ulonglong flags) { int c ATTRIBUTE_UNUSED; int sz, val, found, largest, longest, header_printed; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char header[BUFSIZE]; char *arglist[MAXARGS]; ulonglong tmpflag; found = longest = largest = header_printed = 0; open_tmpfile(); if (dump_enumerator_list("pageflags")) { rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (strstr(buf1, " = ")) { c = parse_line(buf1, arglist); if ((sz = strlen(arglist[0])) > longest) longest = sz; if (strstr(arglist[0], "PG_") && ((val = atoi(arglist[2])) > largest)) largest = val; } } } else error(FATAL, "enum pageflags does not exist in this kernel\n"); largest = (largest+1)/4 + 1; sprintf(header, "%s BIT VALUE\n", mkstring(buf1, longest, LJUST, "PAGE-FLAG")); rewind(pc->tmpfile); if (flags) fprintf(pc->saved_fp, "FLAGS: %llx\n", flags); fprintf(pc->saved_fp, "%s%s", flags ? " " : "", header); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (strstr(buf1, " = ") && strstr(buf1, "PG_")) { c = parse_line(buf1, arglist); val = atoi(arglist[2]); tmpflag = 1ULL << val; if (!flags || (flags & tmpflag)) { fprintf(pc->saved_fp, "%s%s %2d %.*lx\n", flags ? " " : "", mkstring(buf2, longest, LJUST, arglist[0]), val, largest, (ulong)(1ULL << val)); if (flags & tmpflag) found++; } } } if (flags && !found) fprintf(pc->saved_fp, " (none found)\n"); close_tmpfile(); } /* * Support for slub.c slab cache. */ static void kmem_cache_init_slub(void) { if (vt->flags & KMEM_CACHE_INIT) return; if (CRASHDEBUG(1) && !(vt->flags & CONFIG_NUMA) && (vt->numnodes > 1)) error(WARNING, "kmem_cache_init_slub: numnodes: %d without CONFIG_NUMA\n", vt->numnodes); if (kmem_cache_downsize()) add_to_downsized("kmem_cache"); vt->cpu_slab_type = MEMBER_TYPE("kmem_cache", "cpu_slab"); vt->flags |= KMEM_CACHE_INIT; } static void kmem_cache_list_common(void) { int i, cnt; ulong *cache_list; ulong name; char buf[BUFSIZE]; cnt = get_kmem_cache_list(&cache_list); for (i = 0; i < cnt; i++) { fprintf(fp, "%lx ", cache_list[i]); readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)\n"); fprintf(fp, "%s\n", buf); } FREEBUF(cache_list); } #define DUMP_KMEM_CACHE_INFO_SLUB() dump_kmem_cache_info_slub(si) static void dump_kmem_cache_info_slub(struct meminfo *si) { char b1[BUFSIZE]; char b2[BUFSIZE]; int namelen, sizelen, spacelen; fprintf(fp, "%s ", mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); namelen = strlen(si->curname); sprintf(b2, "%ld", si->objsize); sizelen = strlen(b2); spacelen = 0; if (namelen++ > 18) { spacelen = 29 - namelen - sizelen; fprintf(fp, "%s%s%ld ", si->curname, space(spacelen <= 0 ? 1 : spacelen), si->objsize); if (spacelen > 0) spacelen = 1; sprintf(b1, "%c%dld ", '%', 9 + spacelen - 1); } else { fprintf(fp, "%-18s %8ld ", si->curname, si->objsize); sprintf(b1, "%c%dld ", '%', 9); } fprintf(fp, b1, si->inuse); fprintf(fp, "%8ld %5ld %4ldk\n", si->num_slabs * si->objects, si->num_slabs, si->slabsize/1024); } static void dump_kmem_cache_slub(struct meminfo *si) { int i; ulong name, oo; unsigned int size, objsize, objects, order, offset; char *reqname, *p1; char kbuf[BUFSIZE]; char buf[BUFSIZE]; if (INVALID_MEMBER(kmem_cache_node_nr_slabs)) { error(INFO, "option requires kmem_cache_node.nr_slabs member!\n" "(the kernel must be built with CONFIG_SLUB_DEBUG)\n"); return; } order = objects = 0; si->cache_count = get_kmem_cache_list(&si->cache_list); si->cache_buf = GETBUF(SIZE(kmem_cache)); if (VALID_MEMBER(page_objects) && OFFSET(page_objects) == OFFSET(page_inuse)) si->flags |= SLAB_BITFIELD; if (!si->reqname && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); if (si->flags & ADDRESS_SPECIFIED) { if ((p1 = is_slab_page(si, kbuf))) { si->flags |= VERBOSE; si->slab = (ulong)si->spec_addr; } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); goto bailout; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; for (i = 0; i < si->cache_count; i++) { BZERO(si->cache_buf, SIZE(kmem_cache)); if (!readmem(si->cache_list[i], KVADDR, si->cache_buf, SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR|RETURN_PARTIAL)) goto next_cache; name = ULONG(si->cache_buf + OFFSET(kmem_cache_name)); if (!read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)"); if (reqname) { if (!STREQ(reqname, buf)) continue; fprintf(fp, "%s", kmem_cache_hdr); } if (ignore_cache(si, buf)) { fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache_list[i], buf); goto next_cache; } objsize = UINT(si->cache_buf + OFFSET(kmem_cache_objsize)); size = UINT(si->cache_buf + OFFSET(kmem_cache_size)); offset = UINT(si->cache_buf + OFFSET(kmem_cache_offset)); if (VALID_MEMBER(kmem_cache_objects)) { objects = UINT(si->cache_buf + OFFSET(kmem_cache_objects)); order = UINT(si->cache_buf + OFFSET(kmem_cache_order)); } else if (VALID_MEMBER(kmem_cache_oo)) { oo = ULONG(si->cache_buf + OFFSET(kmem_cache_oo)); objects = oo_objects(oo); order = oo_order(oo); } else error(FATAL, "cannot determine " "kmem_cache objects/order values\n"); si->cache = si->cache_list[i]; si->curname = buf; si->objsize = objsize; si->size = size; si->objects = objects; si->slabsize = (PAGESIZE() << order); si->inuse = si->num_slabs = 0; si->slab_offset = offset; if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, si) || !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, si)) goto next_cache; DUMP_KMEM_CACHE_INFO_SLUB(); if (si->flags & ADDRESS_SPECIFIED) { if (!si->slab) si->slab = vaddr_to_slab(si->spec_addr); do_slab_slub(si, VERBOSE); } else if (si->flags & VERBOSE) { do_kmem_cache_slub(si); if (!reqname && ((i+1) < si->cache_count)) fprintf(fp, "%s", kmem_cache_hdr); } next_cache: if (reqname) break; } bailout: FREEBUF(si->cache_list); FREEBUF(si->cache_buf); } /* * Emulate the total count calculation done by the * slab_objects() sysfs function in slub.c. */ static int get_kmem_cache_slub_data(long cmd, struct meminfo *si) { int i, n, node; ulong total_objects, total_slabs; ulong cpu_slab_ptr, node_ptr; ulong node_nr_partial, node_nr_slabs; int full_slabs, objects; long p; short inuse; ulong *nodes, *per_cpu; struct node_table *nt; /* * nodes[n] is not being used (for now) * per_cpu[n] is a count of cpu_slab pages per node. */ nodes = (ulong *)GETBUF(2 * sizeof(ulong) * vt->numnodes); per_cpu = nodes + vt->numnodes; total_slabs = total_objects = 0; for (i = 0; i < kt->cpus; i++) { cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL); if (!cpu_slab_ptr) continue; if ((node = page_to_nid(cpu_slab_ptr)) < 0) goto bailout; switch (cmd) { case GET_SLUB_OBJECTS: if (!readmem(cpu_slab_ptr + OFFSET(page_inuse), KVADDR, &inuse, sizeof(short), "page inuse", RETURN_ON_ERROR)) return FALSE; total_objects += inuse; break; case GET_SLUB_SLABS: total_slabs++; break; } per_cpu[node]++; } for (n = 0; n < vt->numnodes; n++) { if (vt->flags & CONFIG_NUMA) { nt = &vt->node_table[n]; node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *) * nt->node_id)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); if (!node_ptr) continue; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), KVADDR, &node_nr_partial, sizeof(ulong), "kmem_cache_node nr_partial", RETURN_ON_ERROR)) goto bailout; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), KVADDR, &node_nr_slabs, sizeof(ulong), "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) goto bailout; switch (cmd) { case GET_SLUB_OBJECTS: if ((p = count_partial(node_ptr, si)) < 0) return FALSE; total_objects += p; break; case GET_SLUB_SLABS: total_slabs += node_nr_partial; break; } full_slabs = node_nr_slabs - per_cpu[n] - node_nr_partial; objects = si->objects; switch (cmd) { case GET_SLUB_OBJECTS: total_objects += (full_slabs * objects); break; case GET_SLUB_SLABS: total_slabs += full_slabs; break; } if (!(vt->flags & CONFIG_NUMA)) break; } switch (cmd) { case GET_SLUB_OBJECTS: si->inuse = total_objects; break; case GET_SLUB_SLABS: si->num_slabs = total_slabs; break; } FREEBUF(nodes); return TRUE; bailout: FREEBUF(nodes); return FALSE; } static void do_cpu_partial_slub(struct meminfo *si, int cpu) { ulong cpu_slab_ptr; void *partial; cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu]; readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_partial), KVADDR, &partial, sizeof(void *), "kmem_cache_cpu.partial", RETURN_ON_ERROR); fprintf(fp, "CPU %d PARTIAL:\n%s", cpu, partial ? "" : " (empty)\n"); /* * kmem_cache_cpu.partial points to the first page of per cpu partial * list. */ while (partial) { si->slab = (ulong)partial; if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, si->slab); break; } if (!do_slab_slub(si, VERBOSE)) break; readmem((ulong)partial + OFFSET(page_next), KVADDR, &partial, sizeof(void *), "page.next", RETURN_ON_ERROR); } } static void do_kmem_cache_slub(struct meminfo *si) { int i, n; ulong cpu_slab_ptr, node_ptr; ulong node_nr_partial, node_nr_slabs; ulong *per_cpu; struct node_table *nt; per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes); for (i = 0; i < kt->cpus; i++) { if (hide_offline_cpu(i)) { fprintf(fp, "CPU %d [OFFLINE]\n", i); continue; } cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[i]; fprintf(fp, "CPU %d KMEM_CACHE_CPU:\n %lx\n", i, cpu_slab_ptr); cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL); fprintf(fp, "CPU %d SLAB:\n%s", i, cpu_slab_ptr ? "" : " (empty)\n"); if (cpu_slab_ptr) { if ((n = page_to_nid(cpu_slab_ptr)) >= 0) per_cpu[n]++; si->slab = cpu_slab_ptr; if (!do_slab_slub(si, VERBOSE)) continue; } if (VALID_MEMBER(kmem_cache_cpu_partial)) do_cpu_partial_slub(si, i); if (received_SIGINT()) restart(0); } for (n = 0; n < vt->numnodes; n++) { if (vt->flags & CONFIG_NUMA) { nt = &vt->node_table[n]; node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *)* nt->node_id)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); if (node_ptr) { if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), KVADDR, &node_nr_partial, sizeof(ulong), "kmem_cache_node nr_partial", RETURN_ON_ERROR)) break; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), KVADDR, &node_nr_slabs, sizeof(ulong), "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) break; } else node_nr_partial = node_nr_slabs = 0; fprintf(fp, "KMEM_CACHE_NODE NODE SLABS PARTIAL PER-CPU\n"); fprintf(fp, "%lx%s", node_ptr, space(VADDR_PRLEN > 8 ? 2 : 10)); fprintf(fp, "%4d %5ld %7ld %7ld\n", n, node_nr_slabs, node_nr_partial, per_cpu[n]); do_node_lists_slub(si, node_ptr, n); if (!(vt->flags & CONFIG_NUMA)) break; } fprintf(fp, "\n"); FREEBUF(per_cpu); } #define DUMP_SLAB_INFO_SLUB() \ { \ char b1[BUFSIZE], b2[BUFSIZE]; \ fprintf(fp, " %s %s %4d %5d %9d %4d\n", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), \ node, objects, inuse, objects - inuse); \ } static int do_slab_slub(struct meminfo *si, int verbose) { physaddr_t paddr; ulong vaddr, objects_vaddr; ushort inuse, objects; ulong freelist, cpu_freelist, cpu_slab_ptr; int i, free_objects, cpu_slab, is_free, node; ulong p, q; if (!si->slab) { if (CRASHDEBUG(1)) error(INFO, "-S option not supported for CONFIG_SLUB\n"); return FALSE; } if (!page_to_phys(si->slab, &paddr)) { error(INFO, "%s: invalid slab address: %lx\n", si->curname, si->slab); return FALSE; } node = page_to_nid(si->slab); vaddr = PTOV(paddr); if (verbose) fprintf(fp, " %s", slab_hdr); if (!readmem(si->slab + OFFSET(page_inuse), KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) return FALSE; if (!readmem(si->slab + OFFSET(page_freelist), KVADDR, &freelist, sizeof(void *), "page.freelist", RETURN_ON_ERROR)) return FALSE; /* * Pre-2.6.27, the object count and order were fixed in the * kmem_cache structure. Now they may change, say if a high * order slab allocation fails, so the per-slab object count * is kept in the slab. */ if (VALID_MEMBER(page_objects)) { objects_vaddr = si->slab + OFFSET(page_objects); if (si->flags & SLAB_BITFIELD) objects_vaddr += sizeof(ushort); if (!readmem(objects_vaddr, KVADDR, &objects, sizeof(ushort), "page.objects", RETURN_ON_ERROR)) return FALSE; /* * Strip page.frozen bit. */ if (si->flags & SLAB_BITFIELD) { if (__BYTE_ORDER == __LITTLE_ENDIAN) { objects <<= 1; objects >>= 1; } if (__BYTE_ORDER == __BIG_ENDIAN) objects >>= 1; } if (CRASHDEBUG(1) && (objects != si->objects)) error(NOTE, "%s: slab: %lx oo objects: %ld " "slab objects: %d\n", si->curname, si->slab, si->objects, objects); if (objects == (ushort)(-1)) { error(INFO, "%s: slab: %lx invalid page.objects: -1\n", si->curname, si->slab); return FALSE; } } else objects = (ushort)si->objects; if (!verbose) { DUMP_SLAB_INFO_SLUB(); return TRUE; } for (i = 0, cpu_slab = -1; i < kt->cpus; i++) { cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist); if (!cpu_slab_ptr) continue; if (cpu_slab_ptr == si->slab) { cpu_slab = i; /* * Later slub scheme uses the per-cpu freelist * so count the free objects by hand. */ if (cpu_freelist) freelist = cpu_freelist; if ((free_objects = count_free_objects(si, freelist)) < 0) return FALSE; inuse = si->objects - free_objects; break; } } DUMP_SLAB_INFO_SLUB(); fprintf(fp, " %s", free_inuse_hdr); #define PAGE_MAPPING_ANON 1 if (CRASHDEBUG(8)) { fprintf(fp, "< SLUB: free list START: >\n"); i = 0; for (q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q & PAGE_MAPPING_ANON) { fprintf(fp, "< SLUB: free list END: %lx (%d found) >\n", q, i); break; } fprintf(fp, " %lx\n", q); i++; } if (!q) fprintf(fp, "< SLUB: free list END (%d found) >\n", i); } for (p = vaddr; p < vaddr + objects * si->size; p += si->size) { hq_open(); is_free = FALSE; for (is_free = 0, q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q == BADADDR) { hq_close(); return FALSE; } if (q & PAGE_MAPPING_ANON) break; if (p == q) { is_free = TRUE; break; } if (!hq_enter(q)) { hq_close(); error(INFO, "%s: slab: %lx duplicate freelist object: %lx\n", si->curname, si->slab, q); return FALSE; } } hq_close(); if (si->flags & ADDRESS_SPECIFIED) { if ((si->spec_addr < p) || (si->spec_addr >= (p + si->size))) { if (!(si->flags & VERBOSE)) continue; } } fprintf(fp, " %s%lx%s", is_free ? " " : "[", p, is_free ? " " : "]"); if (is_free && (cpu_slab >= 0)) fprintf(fp, "(cpu %d cache)", cpu_slab); fprintf(fp, "\n"); } return TRUE; } static int count_free_objects(struct meminfo *si, ulong freelist) { int c; ulong q; hq_open(); c = 0; for (q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q & PAGE_MAPPING_ANON) break; if (!hq_enter(q)) { error(INFO, "%s: slab: %lx duplicate freelist object: %lx\n", si->curname, si->slab, q); break; } c++; } hq_close(); return c; } static ulong get_freepointer(struct meminfo *si, void *object) { ulong vaddr, nextfree; vaddr = (ulong)(object + si->slab_offset); if (!readmem(vaddr, KVADDR, &nextfree, sizeof(void *), "get_freepointer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: slab: %lx invalid freepointer: %lx\n", si->curname, si->slab, vaddr); return BADADDR; } return nextfree; } static void do_node_lists_slub(struct meminfo *si, ulong node_ptr, int node) { ulong next, last, list_head, flags; int first; if (!node_ptr) return; list_head = node_ptr + OFFSET(kmem_cache_node_partial); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node partial", RETURN_ON_ERROR)) return; fprintf(fp, "NODE %d PARTIAL:\n%s", node, next == list_head ? " (empty)\n" : ""); first = 0; while (next != list_head) { si->slab = last = next - OFFSET(page_lru); if (first++ == 0) fprintf(fp, " %s", slab_hdr); if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, si->slab); return; } if (!do_slab_slub(si, !VERBOSE)) return; if (received_SIGINT()) restart(0); if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) return; if (!IS_KVADDR(next) || ((next != list_head) && !is_page_ptr(next - OFFSET(page_lru), NULL))) { error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", si->curname, last, next); return; } } #define SLAB_STORE_USER (0x00010000UL) flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); if (INVALID_MEMBER(kmem_cache_node_full) || !(flags & SLAB_STORE_USER)) { fprintf(fp, "NODE %d FULL:\n (not tracked)\n", node); return; } list_head = node_ptr + OFFSET(kmem_cache_node_full); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node full", RETURN_ON_ERROR)) return; fprintf(fp, "NODE %d FULL:\n%s", node, next == list_head ? " (empty)\n" : ""); first = 0; while (next != list_head) { si->slab = next - OFFSET(page_lru); if (first++ == 0) fprintf(fp, " %s", slab_hdr); if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid full list slab pointer: %lx\n", si->curname, si->slab); return; } if (!do_slab_slub(si, !VERBOSE)) return; if (received_SIGINT()) restart(0); if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) return; if (!IS_KVADDR(next)) { error(INFO, "%s: full list slab: %lx page.lru.next: %lx\n", si->curname, si->slab, next); return; } } } static char * is_kmem_cache_addr_common(ulong vaddr, char *kbuf) { int i, cnt; ulong *cache_list; ulong name; int found; cnt = get_kmem_cache_list(&cache_list); for (i = 0, found = FALSE; i < cnt; i++) { if (cache_list[i] != vaddr) continue; if (!readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", RETURN_ON_ERROR)) break; if (!read_string(name, kbuf, BUFSIZE-1)) sprintf(kbuf, "(unknown)"); found = TRUE; break; } FREEBUF(cache_list); return (found ? kbuf : NULL); } /* * Kernel-config-neutral page-to-node evaluator. */ static int page_to_nid(ulong page) { int i; physaddr_t paddr; struct node_table *nt; physaddr_t end_paddr; if (!page_to_phys(page, &paddr)) { error(INFO, "page_to_nid: invalid page: %lx\n", page); return -1; } for (i = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; end_paddr = nt->start_paddr + ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); if ((paddr >= nt->start_paddr) && (paddr < end_paddr)) return i; } error(INFO, "page_to_nid: cannot determine node for pages: %lx\n", page); return -1; } /* * Allocate and fill the passed-in buffer with a list of * the current kmem_cache structures. */ static int get_kmem_cache_list(ulong **cache_buf) { int cnt; ulong vaddr; struct list_data list_data, *ld; get_symbol_data("slab_caches", sizeof(void *), &vaddr); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = vaddr; ld->list_head_offset = OFFSET(kmem_cache_list); ld->end = symbol_value("slab_caches"); if (CRASHDEBUG(3)) ld->flags |= VERBOSE; cnt = do_list(ld); *cache_buf = ld->list_ptr; return cnt; } /* * Get the address of the head page of a compound page. */ static ulong compound_head(ulong page) { ulong flags, first_page, compound_head; first_page = page; if (VALID_MEMBER(page_compound_head)) { if (readmem(page+OFFSET(page_compound_head), KVADDR, &compound_head, sizeof(ulong), "page.compound_head", RETURN_ON_ERROR)) { if (compound_head & 1) first_page = compound_head - 1; } } else if (readmem(page+OFFSET(page_flags), KVADDR, &flags, sizeof(ulong), "page.flags", RETURN_ON_ERROR)) { if ((flags & vt->PG_head_tail_mask) == vt->PG_head_tail_mask) readmem(page+OFFSET(page_first_page), KVADDR, &first_page, sizeof(ulong), "page.first_page", RETURN_ON_ERROR); } return first_page; } long count_partial(ulong node, struct meminfo *si) { ulong list_head, next, last; short inuse; ulong total_inuse; ulong count = 0; count = 0; total_inuse = 0; list_head = node + OFFSET(kmem_cache_node_partial); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node.partial", RETURN_ON_ERROR)) return -1; hq_open(); while (next != list_head) { if (!readmem(next - OFFSET(page_lru) + OFFSET(page_inuse), KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) { hq_close(); return -1; } last = next - OFFSET(page_lru); if (inuse == -1) { error(INFO, "%s: partial list slab: %lx invalid page.inuse: -1\n", si->curname, last); break; } total_inuse += inuse; if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) { hq_close(); return -1; } if (!IS_KVADDR(next) || ((next != list_head) && !is_page_ptr(next - OFFSET(page_lru), NULL))) { error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", si->curname, last, next); break; } /* * Keep track of the last 1000 entries, and check * whether the list has recursed back onto itself. */ if ((++count % 1000) == 0) { hq_close(); hq_open(); } if (!hq_enter(next)) { error(INFO, "%s: partial list slab: %lx duplicate slab entry: %lx\n", si->curname, last, next); hq_close(); return -1; } } hq_close(); return total_inuse; } char * is_slab_page(struct meminfo *si, char *buf) { int i, cnt; ulong page_slab, page_flags, name; ulong *cache_list; char *retval; if (!(vt->flags & KMALLOC_SLUB)) return NULL; if (!is_page_ptr((ulong)si->spec_addr, NULL)) return NULL; if (!readmem(si->spec_addr + OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", RETURN_ON_ERROR|QUIET)) return NULL; if (!(page_flags & (1 << vt->PG_slab))) return NULL; if (!readmem(si->spec_addr + OFFSET(page_slab), KVADDR, &page_slab, sizeof(ulong), "page.slab", RETURN_ON_ERROR|QUIET)) return NULL; retval = NULL; cnt = get_kmem_cache_list(&cache_list); for (i = 0; i < cnt; i++) { if (page_slab == cache_list[i]) { if (!readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", QUIET|RETURN_ON_ERROR)) goto bailout; if (!read_string(name, buf, BUFSIZE-1)) goto bailout; retval = buf; break; } } bailout: FREEBUF(cache_list); return retval; } /* * Figure out which of the kmem_cache.cpu_slab declarations * is used by this kernel, and return a pointer to the slab * page being used. Return the kmem_cache_cpu.freelist pointer * if requested. */ static ulong get_cpu_slab_ptr(struct meminfo *si, int cpu, ulong *cpu_freelist) { ulong cpu_slab_ptr, page, freelist; if (cpu_freelist) *cpu_freelist = 0; switch (vt->cpu_slab_type) { case TYPE_CODE_STRUCT: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + OFFSET(kmem_cache_cpu_page)); if (cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) *cpu_freelist = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + OFFSET(kmem_cache_cpu_freelist)); break; case TYPE_CODE_ARRAY: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + (sizeof(void *)*cpu)); if (cpu_slab_ptr && cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) { if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), KVADDR, &freelist, sizeof(void *), "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) *cpu_freelist = freelist; } if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), KVADDR, &page, sizeof(void *), "kmem_cache_cpu.page", RETURN_ON_ERROR)) cpu_slab_ptr = 0; else cpu_slab_ptr = page; } break; case TYPE_CODE_PTR: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu]; if (cpu_slab_ptr && cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) { if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), KVADDR, &freelist, sizeof(void *), "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) *cpu_freelist = freelist; } if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), KVADDR, &page, sizeof(void *), "kmem_cache_cpu.page", RETURN_ON_ERROR)) cpu_slab_ptr = 0; else cpu_slab_ptr = page; } break; default: cpu_slab_ptr = 0; error(FATAL, "cannot determine location of kmem_cache.cpu_slab page\n"); } return cpu_slab_ptr; } /* * In 2.6.27 kmem_cache.order and kmem_cache.objects were merged * into the kmem_cache.oo, a kmem_cache_order_objects structure. * oo_order() and oo_objects() emulate the kernel functions * of the same name. */ static unsigned int oo_order(ulong oo) { return (oo >> 16); } static unsigned int oo_objects(ulong oo) { return (oo & ((1 << 16) - 1)); } #ifdef NOT_USED ulong slab_to_kmem_cache_node(struct meminfo *si, ulong slab_page) { int node; ulong node_ptr; if (vt->flags & CONFIG_NUMA) { node = page_to_nid(slab_page); node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *)*node)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); return node_ptr; } ulong get_kmem_cache_by_name(char *request) { int i, cnt; ulong *cache_list; ulong name; char buf[BUFSIZE]; ulong found; cnt = get_kmem_cache_list(&cache_list); cache_buf = GETBUF(SIZE(kmem_cache)); found = 0; for (i = 0; i < cnt; i++) { readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) continue; if (STREQ(buf, request)) { found = cache_list[i]; break; } } FREEBUF(cache_list); return found; } #endif /* NOT_USED */ crash-7.1.4/unwind_x86.h0000664000000000000000000000077512634305150013515 0ustar rootroot/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ crash-7.1.4/unwind.h0000664000000000000000000005305612634305150013010 0ustar rootroot/* * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999-2000 David Mosberger-Tang */ /* * Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999 David Mosberger-Tang */ /* * unwind.h * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * include/asm-ia64/fpu.h (kernel-2.4.18-6.23) * include/asm-ia64/unwind.h (kernel-2.4.18-6.23) */ #ifndef _ASM_IA64_FPU_H #define _ASM_IA64_FPU_H struct ia64_fpreg { union { unsigned long bits[2]; } u; } __attribute__ ((aligned (16))); #endif /* _ASM_IA64_FPU_H */ #ifndef _ASM_IA64_UNWIND_H #define _ASM_IA64_UNWIND_H /* * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need * full-blown stack unwinding with all the bells and whitles, so there * is not much point in implementing the full IA-64 unwind API (though * it would of course be possible to implement the kernel API on top * of it). */ struct task_struct; /* forward declaration */ struct switch_stack; /* forward declaration */ enum unw_application_register { UNW_AR_BSP, UNW_AR_BSPSTORE, UNW_AR_PFS, UNW_AR_RNAT, UNW_AR_UNAT, UNW_AR_LC, UNW_AR_EC, UNW_AR_FPSR, UNW_AR_RSC, UNW_AR_CCV, UNW_AR_CSD, UNW_AR_SSD }; /* * The following declarations are private to the unwind * implementation: */ struct unw_stack { unsigned long limit; unsigned long top; }; #define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) /* * No user of this module should every access this structure directly * as it is subject to change. It is declared here solely so we can * use automatic variables. */ struct unw_frame_info { struct unw_stack regstk; struct unw_stack memstk; unsigned int flags; short hint; short prev_script; /* current frame info: */ unsigned long bsp; /* backing store pointer value */ unsigned long sp; /* stack pointer value */ unsigned long psp; /* previous sp value */ unsigned long ip; /* instruction pointer value */ unsigned long pr; /* current predicate values */ unsigned long *cfm_loc; /* cfm save location (or NULL) */ #if defined(UNWIND_V2) || defined(UNWIND_V3) unsigned long pt; /* struct pt_regs location */ #endif struct task_struct *task; struct switch_stack *sw; /* preserved state: */ unsigned long *bsp_loc; /* previous bsp save location */ unsigned long *bspstore_loc; unsigned long *pfs_loc; unsigned long *rnat_loc; unsigned long *rp_loc; unsigned long *pri_unat_loc; unsigned long *unat_loc; unsigned long *pr_loc; unsigned long *lc_loc; unsigned long *fpsr_loc; struct unw_ireg { unsigned long *loc; struct unw_ireg_nat { long type : 3; /* enum unw_nat_type */ signed long off : 61; /* NaT word is at loc+nat.off */ } nat; } r4, r5, r6, r7; unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc; struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16]; }; /* * The official API follows below: */ /* * Initialize unwind support. */ extern void unw_init (void); extern void unw_create_gate_table (void); extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, const void *table_start, const void *table_end); extern void unw_remove_unwind_table (void *handle); /* * Prepare to unwind blocked task t. */ #ifndef REDHAT extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw); #endif /* !REDHAT */ /* * Prepare to unwind the currently running thread. */ extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg); /* * Unwind to previous to frame. Returns 0 if successful, negative * number in case of an error. */ #ifndef REDHAT extern int unw_unwind (struct unw_frame_info *info); #endif /* !REDHAT */ /* * Unwind until the return pointer is in user-land (or until an error * occurs). Returns 0 if successful, negative number in case of * error. */ extern int unw_unwind_to_user (struct unw_frame_info *info); #define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0) static inline int unw_get_ip (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->ip; return 0; } static inline int unw_get_sp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->sp; return 0; } static inline int unw_get_psp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->psp; return 0; } static inline int unw_get_bsp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->bsp; return 0; } static inline int unw_get_cfm (struct unw_frame_info *info, unsigned long *valp) { *valp = *(info)->cfm_loc; return 0; } static inline int unw_set_cfm (struct unw_frame_info *info, unsigned long val) { *(info)->cfm_loc = val; return 0; } static inline int unw_get_rp (struct unw_frame_info *info, unsigned long *val) { if (!info->rp_loc) return -1; *val = *info->rp_loc; return 0; } #ifdef UNWIND_V1 extern int unw_access_gr_v1 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v1 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v1 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v1 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v1 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v1 #define unw_access_br unw_access_br_v1 #define unw_access_fr unw_access_fr_v1 #define unw_access_ar unw_access_ar_v1 #define unw_access_pr unw_access_pr_v1 #endif #ifdef UNWIND_V2 extern int unw_access_gr_v2 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v2 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v2 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v2 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v2 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v2 #define unw_access_br unw_access_br_v2 #define unw_access_fr unw_access_fr_v2 #define unw_access_ar unw_access_ar_v2 #define unw_access_pr unw_access_pr_v2 #endif #ifdef UNWIND_V3 extern int unw_access_gr_v3 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v3 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v3 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v3 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v3 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v3 #define unw_access_br unw_access_br_v3 #define unw_access_fr unw_access_fr_v3 #define unw_access_ar unw_access_ar_v3 #define unw_access_pr unw_access_pr_v3 #endif static inline int unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat) { return unw_access_gr(i, n, &v, &nat, 1); } static inline int unw_set_br (struct unw_frame_info *i, int n, unsigned long v) { return unw_access_br(i, n, &v, 1); } static inline int unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v) { return unw_access_fr(i, n, &v, 1); } static inline int unw_set_ar (struct unw_frame_info *i, int n, unsigned long v) { return unw_access_ar(i, n, &v, 1); } static inline int unw_set_pr (struct unw_frame_info *i, unsigned long v) { return unw_access_pr(i, &v, 1); } #define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0) #define unw_get_br(i,n,v) unw_access_br(i,n,v,0) #define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0) #define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0) #define unw_get_pr(i,v) unw_access_pr(i,v,0) #ifdef UNWIND_V1 struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f10; /* scratch, but untouched by kernel */ struct ia64_fpreg f11; /* scratch, but untouched by kernel */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b6; /* scratch */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long r14; /* scratch */ unsigned long r15; /* scratch */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long b0; /* return pointer (bp) */ unsigned long b7; /* scratch */ /* * Floating point registers that the kernel considers * scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ }; #endif /* UNWIND_V1 */ #ifdef UNWIND_V2 struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f10; /* scratch, but untouched by kernel */ struct ia64_fpreg f11; /* scratch, but untouched by kernel */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b6; /* scratch */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long r14; /* scratch */ unsigned long r15; /* scratch */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long b0; /* return pointer (bp) */ unsigned long b7; /* scratch */ /* * Floating point registers that the kernel considers * scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ }; #endif /* UNWIND_V2 */ #ifdef UNWIND_V3 struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * This structure contains the addition registers that need to * preserved across a context switch. This generally consists of * "preserved" registers. */ struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; #endif /* UNWIND_V3 */ #endif /* _ASM_UNWIND_H */ crash-7.1.4/COPYING30000664000000000000000000010451312634305150012444 0ustar rootroot GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . crash-7.1.4/netdump.c0000664000000000000000000037432012634305150013153 0ustar rootroot/* netdump.c * * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: David Anderson */ #define _LARGEFILE64_SOURCE 1 /* stat64() */ #include "defs.h" #include "netdump.h" #include "sadump.h" #include "xen_dom0.h" static struct vmcore_data vmcore_data = { 0 }; static struct vmcore_data *nd = &vmcore_data; static struct proc_kcore_data proc_kcore_data = { 0 }; static struct proc_kcore_data *pkd = &proc_kcore_data; static void netdump_print(char *, ...); static size_t resize_elf_header(int, char *, char **, ulong); static void dump_Elf32_Ehdr(Elf32_Ehdr *); static void dump_Elf32_Phdr(Elf32_Phdr *, int); static size_t dump_Elf32_Nhdr(Elf32_Off offset, int); static void dump_Elf64_Ehdr(Elf64_Ehdr *); static void dump_Elf64_Phdr(Elf64_Phdr *, int); static size_t dump_Elf64_Nhdr(Elf64_Off offset, int); static void get_netdump_regs_32(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_ppc(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_ppc64(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_arm(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_arm64(struct bt_info *, ulong *, ulong *); static void check_dumpfile_size(char *); static int proc_kcore_init_32(FILE *fp); static int proc_kcore_init_64(FILE *fp); static char *get_regs_from_note(char *, ulong *, ulong *); static void kdump_get_osrelease(void); static char *vmcoreinfo_read_string(const char *); #define ELFSTORE 1 #define ELFREAD 0 #define MIN_PAGE_SIZE (4096) /* * Architectures that have configurable page sizes, * can differ from the host machine's page size. */ #define READ_PAGESIZE_FROM_VMCOREINFO() \ (machine_type("IA64") || machine_type("PPC64") || machine_type("PPC") || machine_type("ARM64")) /* * kdump installs NT_PRSTATUS elf notes only to the cpus * that were online during dumping. Hence we call into * this function after reading the cpu map from the kernel, * to remap the NT_PRSTATUS notes only to the online cpus. */ void map_cpus_to_prstatus(void) { void **nt_ptr; int online, i, j, nrcpus; size_t size; if (pc->flags2 & QEMU_MEM_DUMP_ELF) /* notes exist for all cpus */ return; if (!(online = get_cpus_online()) || (online == kt->cpus)) return; if (CRASHDEBUG(1)) error(INFO, "cpus: %d online: %d NT_PRSTATUS notes: %d (remapping)\n", kt->cpus, online, nd->num_prstatus_notes); size = NR_CPUS * sizeof(void *); nt_ptr = (void **)GETBUF(size); BCOPY(nd->nt_prstatus_percpu, nt_ptr, size); BZERO(nd->nt_prstatus_percpu, size); /* * Re-populate the array with the notes mapping to online cpus */ nrcpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = 0, j = 0; i < nrcpus; i++) { if (in_cpu_map(ONLINE_MAP, i)) nd->nt_prstatus_percpu[i] = nt_ptr[j++]; } FREEBUF(nt_ptr); } /* * Determine whether a file is a netdump/diskdump/kdump creation, * and if TRUE, initialize the vmcore_data structure. */ int is_netdump(char *file, ulong source_query) { int i, fd, swap; Elf32_Ehdr *elf32; Elf32_Phdr *load32; Elf64_Ehdr *elf64; Elf64_Phdr *load64; char *eheader; char buf[BUFSIZE]; size_t size, len, tot; Elf32_Off offset32; Elf64_Off offset64; ulong format; if ((fd = open(file, O_RDWR)) < 0) { if ((fd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return FALSE; } } size = MIN_NETDUMP_ELF_HEADER_SIZE; if ((eheader = (char *)malloc(size)) == NULL) { fprintf(stderr, "cannot malloc minimum ELF header buffer\n"); clean_exit(1); } if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, size)) goto bailout; } else { if (read(fd, eheader, size) != size) { sprintf(buf, "%s: ELF header read", file); perror(buf); goto bailout; } } load32 = NULL; load64 = NULL; format = 0; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; /* * Verify the ELF header, and determine the dumpfile format. * * For now, kdump vmcores differ from netdump/diskdump like so: * * 1. The first kdump PT_LOAD segment is packed just after * the ELF header, whereas netdump/diskdump page-align * the first PT_LOAD segment. * 2. Each kdump PT_LOAD segment has a p_align field of zero, * whereas netdump/diskdump have their p_align fields set * to the system page-size. * * If either kdump difference is seen, presume kdump -- this * is obviously subject to change. */ if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) goto bailout; swap = (((eheader[EI_DATA] == ELFDATA2LSB) && (__BYTE_ORDER == __BIG_ENDIAN)) || ((eheader[EI_DATA] == ELFDATA2MSB) && (__BYTE_ORDER == __LITTLE_ENDIAN))); if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && (swap16(elf32->e_type, swap) == ET_CORE) && (swap32(elf32->e_version, swap) == EV_CURRENT) && (swap16(elf32->e_phnum, swap) >= 2)) { switch (swap16(elf32->e_machine, swap)) { case EM_386: if (machine_type_mismatch(file, "X86", NULL, source_query)) goto bailout; break; case EM_ARM: if (machine_type_mismatch(file, "ARM", NULL, source_query)) goto bailout; break; case EM_PPC: if (machine_type_mismatch(file, "PPC", NULL, source_query)) goto bailout; break; case EM_MIPS: if (machine_type_mismatch(file, "MIPS", NULL, source_query)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, source_query)) goto bailout; } if (endian_mismatch(file, elf32->e_ident[EI_DATA], source_query)) goto bailout; load32 = (Elf32_Phdr *) &eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; if ((load32->p_offset & (MIN_PAGE_SIZE-1)) || (load32->p_align == 0)) format = KDUMP_ELF32; else format = NETDUMP_ELF32; } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && (swap16(elf64->e_type, swap) == ET_CORE) && (swap32(elf64->e_version, swap) == EV_CURRENT) && (swap16(elf64->e_phnum, swap) >= 2)) { switch (swap16(elf64->e_machine, swap)) { case EM_IA_64: if (machine_type_mismatch(file, "IA64", NULL, source_query)) goto bailout; break; case EM_PPC64: if (machine_type_mismatch(file, "PPC64", NULL, source_query)) goto bailout; break; case EM_X86_64: if (machine_type_mismatch(file, "X86_64", NULL, source_query)) goto bailout; break; case EM_S390: if (machine_type_mismatch(file, "S390X", NULL, source_query)) goto bailout; break; case EM_386: if (machine_type_mismatch(file, "X86", NULL, source_query)) goto bailout; break; case EM_ARM: if (machine_type_mismatch(file, "ARM", NULL, source_query)) goto bailout; break; case EM_AARCH64: if (machine_type_mismatch(file, "ARM64", NULL, source_query)) goto bailout; break; case EM_MIPS: if (machine_type_mismatch(file, "MIPS", NULL, source_query)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, source_query)) goto bailout; } if (endian_mismatch(file, elf64->e_ident[EI_DATA], source_query)) goto bailout; load64 = (Elf64_Phdr *) &eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; if ((load64->p_offset & (MIN_PAGE_SIZE-1)) || (load64->p_align == 0)) format = KDUMP_ELF64; else format = NETDUMP_ELF64; } else { if (CRASHDEBUG(2)) error(INFO, "%s: not a %s ELF dumpfile\n", file, source_query == NETDUMP_LOCAL ? "netdump" : "kdump"); goto bailout; } if (source_query == KCORE_LOCAL) { close(fd); return TRUE; } switch (format) { case NETDUMP_ELF32: case NETDUMP_ELF64: if (source_query & (NETDUMP_LOCAL|NETDUMP_REMOTE)) break; else goto bailout; case KDUMP_ELF32: case KDUMP_ELF64: if (source_query & KDUMP_LOCAL) break; else goto bailout; } if (!(size = resize_elf_header(fd, file, &eheader, format))) goto bailout; nd->ndfd = fd; nd->elf_header = eheader; nd->flags = format | source_query; switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: nd->header_size = size; nd->elf32 = (Elf32_Ehdr *)&nd->elf_header[0]; nd->num_pt_load_segments = nd->elf32->e_phnum - 1; if ((nd->pt_load_segments = (struct pt_load_segment *) malloc(sizeof(struct pt_load_segment) * nd->num_pt_load_segments)) == NULL) { fprintf(stderr, "cannot malloc PT_LOAD segment buffers\n"); clean_exit(1); } nd->notes32 = (Elf32_Phdr *) &nd->elf_header[sizeof(Elf32_Ehdr)]; nd->load32 = (Elf32_Phdr *) &nd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; if (format == NETDUMP_ELF32) nd->page_size = (uint)nd->load32->p_align; dump_Elf32_Ehdr(nd->elf32); dump_Elf32_Phdr(nd->notes32, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf32_Phdr(nd->load32 + i, ELFSTORE+i); offset32 = nd->notes32->p_offset; for (tot = 0; tot < nd->notes32->p_filesz; tot += len) { if (!(len = dump_Elf32_Nhdr(offset32, ELFSTORE))) break; offset32 += len; } break; case NETDUMP_ELF64: case KDUMP_ELF64: nd->header_size = size; nd->elf64 = (Elf64_Ehdr *)&nd->elf_header[0]; nd->num_pt_load_segments = nd->elf64->e_phnum - 1; if ((nd->pt_load_segments = (struct pt_load_segment *) malloc(sizeof(struct pt_load_segment) * nd->num_pt_load_segments)) == NULL) { fprintf(stderr, "cannot malloc PT_LOAD segment buffers\n"); clean_exit(1); } nd->notes64 = (Elf64_Phdr *) &nd->elf_header[sizeof(Elf64_Ehdr)]; nd->load64 = (Elf64_Phdr *) &nd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; if (format == NETDUMP_ELF64) nd->page_size = (uint)nd->load64->p_align; dump_Elf64_Ehdr(nd->elf64); dump_Elf64_Phdr(nd->notes64, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf64_Phdr(nd->load64 + i, ELFSTORE+i); offset64 = nd->notes64->p_offset; for (tot = 0; tot < nd->notes64->p_filesz; tot += len) { if (!(len = dump_Elf64_Nhdr(offset64, ELFSTORE))) break; offset64 += len; } break; } if (CRASHDEBUG(1)) netdump_memory_dump(fp); pc->read_vmcoreinfo = vmcoreinfo_read_string; if ((source_query == KDUMP_LOCAL) && (pc->flags2 & GET_OSRELEASE)) kdump_get_osrelease(); if ((source_query == KDUMP_LOCAL) && (pc->flags2 & GET_LOG)) { pc->dfd = nd->ndfd; pc->readmem = read_kdump; nd->flags |= KDUMP_LOCAL; pc->flags |= KDUMP; get_log_from_vmcoreinfo(file); } return nd->header_size; bailout: close(fd); free(eheader); return FALSE; } /* * Search through all PT_LOAD segments to determine the * file offset where the physical memory segment(s) start * in the vmcore, and consider everything prior to that as * header contents. */ static size_t resize_elf_header(int fd, char *file, char **eheader_ptr, ulong format) { int i; char buf[BUFSIZE]; char *eheader; Elf32_Ehdr *elf32; Elf32_Phdr *load32; Elf64_Ehdr *elf64; Elf64_Phdr *load64; Elf32_Off p_offset32; Elf64_Off p_offset64; size_t header_size; uint num_pt_load_segments; eheader = *eheader_ptr; header_size = num_pt_load_segments = 0; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: num_pt_load_segments = elf32->e_phnum - 1; header_size = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) + (sizeof(Elf32_Phdr) * num_pt_load_segments); break; case NETDUMP_ELF64: case KDUMP_ELF64: num_pt_load_segments = elf64->e_phnum - 1; header_size = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) + (sizeof(Elf64_Phdr) * num_pt_load_segments); break; } if ((eheader = (char *)realloc(eheader, header_size)) == NULL) { fprintf(stderr, "cannot realloc interim ELF header buffer\n"); clean_exit(1); } else *eheader_ptr = eheader; if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, header_size)) return 0; } else { if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "%s: lseek", file); perror(buf); return 0; } if (read(fd, eheader, header_size) != header_size) { sprintf(buf, "%s: ELF header read", file); perror(buf); return 0; } } switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: load32 = (Elf32_Phdr *)&eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; p_offset32 = load32->p_offset; for (i = 0; i < num_pt_load_segments; i++, load32 += 1) { if (load32->p_offset && (p_offset32 > load32->p_offset)) p_offset32 = load32->p_offset; } header_size = (size_t)p_offset32; break; case NETDUMP_ELF64: case KDUMP_ELF64: load64 = (Elf64_Phdr *)&eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; p_offset64 = load64->p_offset; for (i = 0; i < num_pt_load_segments; i++, load64 += 1) { if (load64->p_offset && (p_offset64 > load64->p_offset)) p_offset64 = load64->p_offset; } header_size = (size_t)p_offset64; break; } if ((eheader = (char *)realloc(eheader, header_size)) == NULL) { perror("realloc"); fprintf(stderr, "cannot realloc resized ELF header buffer\n"); clean_exit(1); } else *eheader_ptr = eheader; if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, header_size)) return 0; } else { if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "%s: lseek", file); perror(buf); return 0; } if (read(fd, eheader, header_size) != header_size) { sprintf(buf, "%s: ELF header read", file); perror(buf); return 0; } } return header_size; } /* * Return the e_version number of an ELF file * (or -1 if its not readable ELF file) */ int file_elf_version(char *file) { int fd, size; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; char header[MIN_NETDUMP_ELF_HEADER_SIZE]; char buf[BUFSIZE]; if ((fd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return -1; } size = MIN_NETDUMP_ELF_HEADER_SIZE; if (read(fd, header, size) != size) { sprintf(buf, "%s: read", file); perror(buf); close(fd); return -1; } close(fd); elf32 = (Elf32_Ehdr *)&header[0]; elf64 = (Elf64_Ehdr *)&header[0]; if (STRNEQ(elf32->e_ident, ELFMAG) && (elf32->e_ident[EI_CLASS] == ELFCLASS32) && (elf32->e_ident[EI_DATA] == ELFDATA2LSB) && (elf32->e_ident[EI_VERSION] == EV_CURRENT)) { return (elf32->e_version); } else if (STRNEQ(elf64->e_ident, ELFMAG) && (elf64->e_ident[EI_CLASS] == ELFCLASS64) && (elf64->e_ident[EI_VERSION] == EV_CURRENT)) { return (elf64->e_version); } return -1; } /* * Check whether any PT_LOAD segment goes beyond the file size. */ static void check_dumpfile_size(char *file) { int i; struct stat64 stat; struct pt_load_segment *pls; uint64_t segment_end; if (is_ramdump_image()) return; if (stat64(file, &stat) < 0) return; if (S_ISBLK(stat.st_mode)) { error(NOTE, "%s: No dump complete check for block devices\n", file); return; } for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; segment_end = pls->file_offset + (pls->phys_end - pls->phys_start); if (segment_end > stat.st_size) { error(WARNING, "%s: may be truncated or incomplete\n" " PT_LOAD p_offset: %lld\n" " p_filesz: %lld\n" " bytes required: %lld\n" " dumpfile size: %lld\n\n", file, pls->file_offset, pls->phys_end - pls->phys_start, segment_end, stat.st_size); return; } } } /* * Perform any post-dumpfile determination stuff here. */ int netdump_init(char *unused, FILE *fptr) { if (!VMCORE_VALID()) return FALSE; nd->ofp = fptr; check_dumpfile_size(pc->dumpfile); return TRUE; } /* * Read from a netdump-created dumpfile. */ int read_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; ssize_t read_ret; struct pt_load_segment *pls; int i; offset = 0; /* * The Elf32_Phdr has 32-bit fields for p_paddr, p_filesz and * p_memsz, so for now, multiple PT_LOAD segment support is * restricted to 64-bit machines for netdump/diskdump vmcores. * However, kexec/kdump has introduced the optional use of a * 64-bit ELF header for 32-bit processors. */ switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: offset = (off_t)paddr + (off_t)nd->header_size; break; case NETDUMP_ELF64: case KDUMP_ELF32: case KDUMP_ELF64: if (nd->num_pt_load_segments == 1) { offset = (off_t)paddr + (off_t)nd->header_size - (off_t)nd->pt_load_segments[0].phys_start; break; } for (i = offset = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if ((paddr >= pls->phys_start) && (paddr < pls->phys_end)) { offset = (off_t)(paddr - pls->phys_start) + pls->file_offset; break; } if (pls->zero_fill && (paddr >= pls->phys_end) && (paddr < pls->zero_fill)) { memset(bufptr, 0, cnt); if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: zero-fill: " "addr: %lx paddr: %llx cnt: %d\n", addr, (ulonglong)paddr, cnt); return cnt; } } if (!offset) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "offset not found for paddr: %llx\n", (ulonglong)paddr); return READ_ERROR; } break; } if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: addr: %lx paddr: %llx cnt: %d offset: %llx\n", addr, (ulonglong)paddr, cnt, (ulonglong)offset); if (FLAT_FORMAT()) { if (!read_flattened_format(nd->ndfd, offset, bufptr, cnt)) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "read_flattened_format failed for offset:" " %llx\n", (ulonglong)offset); return READ_ERROR; } } else { if (lseek(nd->ndfd, offset, SEEK_SET) == -1) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: SEEK_ERROR: " "offset: %llx\n", (ulonglong)offset); return SEEK_ERROR; } read_ret = read(nd->ndfd, bufptr, cnt); if (read_ret != cnt) { /* * If the incomplete flag has been set in the header, * first check whether zero_excluded has been set. */ if (is_incomplete_dump() && (read_ret >= 0) && (*diskdump_flags & ZERO_EXCLUDED)) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: zero-fill: " "addr: %lx paddr: %llx cnt: %d\n", addr + read_ret, (ulonglong)paddr + read_ret, cnt - (int)read_ret); bufptr += read_ret; bzero(bufptr, cnt - read_ret); return cnt; } if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "offset: %llx\n", (ulonglong)offset); return READ_ERROR; } } return cnt; } /* * Write to a netdump-created dumpfile. Note that cmd_wr() does not * allow writes to dumpfiles, so you can't get here from there. * But, if it would ever be helpful, here it is... */ int write_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; struct pt_load_segment *pls; int i; offset = 0; switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: offset = (off_t)paddr + (off_t)nd->header_size; break; case NETDUMP_ELF64: case KDUMP_ELF32: case KDUMP_ELF64: if (nd->num_pt_load_segments == 1) { offset = (off_t)paddr + (off_t)nd->header_size; break; } for (i = offset = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if ((paddr >= pls->phys_start) && (paddr < pls->phys_end)) { offset = (off_t)(paddr - pls->phys_start) + pls->file_offset; break; } } if (!offset) return READ_ERROR; break; } if (lseek(nd->ndfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (write(nd->ndfd, bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Set the file pointer for debug output. */ FILE * set_netdump_fp(FILE *fp) { if (!VMCORE_VALID()) return NULL; nd->ofp = fp; return fp; } /* * Generic print routine to handle integral and remote daemon output. */ static void netdump_print(char *fmt, ...) { char buf[BUFSIZE]; va_list ap; if (!fmt || !strlen(fmt) || !VMCORE_VALID()) return; va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (nd->ofp) fprintf(nd->ofp, "%s", buf); else console(buf); } uint netdump_page_size(void) { if (!VMCORE_VALID()) return 0; return nd->page_size; } int netdump_free_memory(void) { return (VMCORE_VALID() ? 0 : 0); } int netdump_memory_used(void) { return (VMCORE_VALID() ? 0 : 0); } /* * The netdump server will eventually use the NT_TASKSTRUCT section * to pass the task address. Until such time, look at the ebp of the * user_regs_struct, which is located at the end of the NT_PRSTATUS * elf_prstatus structure, minus one integer: * * struct elf_prstatus * { * ... * elf_gregset_t pr_reg; (maps to user_regs_struct) * int pr_fpvalid; * }; * * If it's a kernel stack address who's adjusted task_struct value is * equal to one of the active set tasks, we'll presume it's legit. * */ ulong get_netdump_panic_task(void) { #ifdef DAEMON return nd->task_struct; #else int i, crashing_cpu; size_t len; char *user_regs; ulong ebp, esp, task; if (!VMCORE_VALID() || !get_active_set()) goto panic_task_undetermined; if (nd->task_struct) { if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_TASKSTRUCT: %lx\n", nd->task_struct); return nd->task_struct; } switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: case NETDUMP_ELF64: crashing_cpu = -1; break; case KDUMP_ELF32: case KDUMP_ELF64: crashing_cpu = -1; if (kernel_symbol_exists("crashing_cpu")) { get_symbol_data("crashing_cpu", sizeof(int), &i); if ((i >= 0) && in_cpu_map(ONLINE_MAP, i)) { crashing_cpu = i; if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: active_set[crashing_cpu: %d]: %lx\n", crashing_cpu, tt->active_set[crashing_cpu]); } } if ((nd->num_prstatus_notes > 1) && (crashing_cpu == -1)) goto panic_task_undetermined; break; default: crashing_cpu = -1; break; } if (nd->elf32 && (nd->elf32->e_machine == EM_386)) { Elf32_Nhdr *note32 = NULL; if (nd->num_prstatus_notes > 1) { if (crashing_cpu != -1) note32 = (Elf32_Nhdr *) nd->nt_prstatus_percpu[crashing_cpu]; } else note32 = (Elf32_Nhdr *)nd->nt_prstatus; if (!note32) goto panic_task_undetermined; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = ((char *)note32 + len) - SIZE(user_regs_struct) - sizeof(int); ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); check_ebp_esp: if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_PRSTATUS esp: %lx ebp: %lx\n", esp, ebp); if (IS_KVADDR(esp)) { task = stkptr_to_task(esp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: esp: %lx -> task: %lx\n", esp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } if (IS_KVADDR(ebp)) { task = stkptr_to_task(ebp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: ebp: %lx -> task: %lx\n", ebp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } } else if (nd->elf64) { Elf64_Nhdr *note64 = NULL; if (nd->num_prstatus_notes > 1) { if (crashing_cpu != -1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[crashing_cpu]; } else note64 = (Elf64_Nhdr *)nd->nt_prstatus; if (!note64) goto panic_task_undetermined; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); user_regs = (char *)((char *)note64 + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); if (nd->elf64->e_machine == EM_386) { ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); goto check_ebp_esp; } if (nd->elf64->e_machine == EM_PPC64) { /* * Get the GPR1 register value. */ esp = *(ulong *)((char *)user_regs + 8); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_PRSTATUS esp: %lx\n", esp); if (IS_KVADDR(esp)) { task = stkptr_to_task(esp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: esp: %lx -> task: %lx\n", esp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } } if (nd->elf64->e_machine == EM_X86_64) { if ((crashing_cpu != -1) && (crashing_cpu <= kt->cpus)) return (tt->active_set[crashing_cpu]); } } panic_task_undetermined: if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: failed\n"); return NO_TASK; #endif } /* * Get the switch_stack address of the passed-in task. Currently only * the panicking task reports its switch-stack address. */ ulong get_netdump_switch_stack(ulong task) { #ifdef DAEMON if (nd->task_struct == task) return nd->switch_stack; return 0; #else if (!VMCORE_VALID() || !get_active_set()) return 0; if (nd->task_struct == task) return nd->switch_stack; return 0; #endif } int netdump_memory_dump(FILE *fp) { int i, others, wrap, flen; size_t len, tot; FILE *fpsave; Elf32_Off offset32; Elf32_Off offset64; struct pt_load_segment *pls; if (!VMCORE_VALID()) return FALSE; fpsave = nd->ofp; nd->ofp = fp; if (FLAT_FORMAT()) dump_flat_header(nd->ofp); netdump_print("vmcore_data: \n"); netdump_print(" flags: %lx (", nd->flags); others = 0; if (nd->flags & NETDUMP_LOCAL) netdump_print("%sNETDUMP_LOCAL", others++ ? "|" : ""); if (nd->flags & KDUMP_LOCAL) netdump_print("%sKDUMP_LOCAL", others++ ? "|" : ""); if (nd->flags & NETDUMP_REMOTE) netdump_print("%sNETDUMP_REMOTE", others++ ? "|" : ""); if (nd->flags & NETDUMP_ELF32) netdump_print("%sNETDUMP_ELF32", others++ ? "|" : ""); if (nd->flags & NETDUMP_ELF64) netdump_print("%sNETDUMP_ELF64", others++ ? "|" : ""); if (nd->flags & KDUMP_ELF32) netdump_print("%sKDUMP_ELF32", others++ ? "|" : ""); if (nd->flags & KDUMP_ELF64) netdump_print("%sKDUMP_ELF64", others++ ? "|" : ""); if (nd->flags & PARTIAL_DUMP) netdump_print("%sPARTIAL_DUMP", others++ ? "|" : ""); if (nd->flags & QEMU_MEM_DUMP_KDUMP_BACKUP) netdump_print("%sQEMU_MEM_DUMP_KDUMP_BACKUP", others++ ? "|" : ""); netdump_print(") %s\n", FLAT_FORMAT() ? "[FLAT]" : ""); if ((pc->flags & RUNTIME) && symbol_exists("dump_level")) { int dump_level; if (readmem(symbol_value("dump_level"), KVADDR, &dump_level, sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) { netdump_print(" dump_level: %d (0x%x) %s", dump_level, dump_level, dump_level > 0 ? "(" : ""); #define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ #define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ #define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ #define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ #define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ others = 0; if (dump_level & DUMP_EXCLUDE_CACHE) netdump_print("%sDUMP_EXCLUDE_CACHE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_CLEAN) netdump_print("%sDUMP_EXCLUDE_CLEAN", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_FREE) netdump_print("%sDUMP_EXCLUDE_FREE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_ANON) netdump_print("%sDUMP_EXCLUDE_ANON", others++ ? "|" : ""); if (dump_level & DUMP_SAVE_PRIVATE) netdump_print("%sDUMP_SAVE_PRIVATE", others++ ? "|" : ""); netdump_print("%s\n", dump_level > 0 ? ")" : ""); } else netdump_print(" dump_level: (unknown)\n"); } else if (!(pc->flags & RUNTIME) && symbol_exists("dump_level")) netdump_print(" dump_level: (undetermined)\n"); netdump_print(" ndfd: %d\n", nd->ndfd); netdump_print(" ofp: %lx\n", nd->ofp); netdump_print(" header_size: %d\n", nd->header_size); netdump_print(" num_pt_load_segments: %d\n", nd->num_pt_load_segments); for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; netdump_print(" pt_load_segment[%d]:\n", i); netdump_print(" file_offset: %lx\n", pls->file_offset); netdump_print(" phys_start: %llx\n", pls->phys_start); netdump_print(" phys_end: %llx\n", pls->phys_end); netdump_print(" zero_fill: %llx\n", pls->zero_fill); } netdump_print(" elf_header: %lx\n", nd->elf_header); netdump_print(" elf32: %lx\n", nd->elf32); netdump_print(" notes32: %lx\n", nd->notes32); netdump_print(" load32: %lx\n", nd->load32); netdump_print(" elf64: %lx\n", nd->elf64); netdump_print(" notes64: %lx\n", nd->notes64); netdump_print(" load64: %lx\n", nd->load64); netdump_print(" nt_prstatus: %lx\n", nd->nt_prstatus); netdump_print(" nt_prpsinfo: %lx\n", nd->nt_prpsinfo); netdump_print(" nt_taskstruct: %lx\n", nd->nt_taskstruct); netdump_print(" task_struct: %lx\n", nd->task_struct); netdump_print(" page_size: %d\n", nd->page_size); netdump_print(" switch_stack: %lx\n", nd->switch_stack); dump_xen_kdump_data(fp); netdump_print(" num_prstatus_notes: %d\n", nd->num_prstatus_notes); netdump_print(" num_qemu_notes: %d\n", nd->num_qemu_notes); netdump_print(" vmcoreinfo: %lx\n", (ulong)nd->vmcoreinfo); netdump_print(" size_vmcoreinfo: %d\n", nd->size_vmcoreinfo); netdump_print(" nt_prstatus_percpu: "); wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; if (nd->num_prstatus_notes == 1) netdump_print("%.*lx\n", flen, nd->nt_prstatus_percpu[0]); else { for (i = 0; i < nd->num_prstatus_notes; i++) { if ((i % wrap) == 0) netdump_print("\n "); netdump_print("%.*lx ", flen, nd->nt_prstatus_percpu[i]); } } netdump_print("\n"); netdump_print(" nt_qemu_percpu: "); if (nd->num_qemu_notes == 1) netdump_print("%.*lx\n", flen, nd->nt_qemu_percpu[0]); else { for (i = 0; i < nd->num_qemu_notes; i++) { if ((i % wrap) == 0) netdump_print("\n "); netdump_print("%.*lx ", flen, nd->nt_qemu_percpu[i]); } } netdump_print("\n"); netdump_print(" backup_src_start: %llx\n", nd->backup_src_start); netdump_print(" backup_src_size: %lx\n", nd->backup_src_size); netdump_print(" backup_offset: %llx\n", nd->backup_offset); netdump_print("\n"); switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: case KDUMP_ELF32: dump_Elf32_Ehdr(nd->elf32); dump_Elf32_Phdr(nd->notes32, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf32_Phdr(nd->load32 + i, ELFREAD); offset32 = nd->notes32->p_offset; for (tot = 0; tot < nd->notes32->p_filesz; tot += len) { if (!(len = dump_Elf32_Nhdr(offset32, ELFREAD))) break; offset32 += len; } break; case NETDUMP_ELF64: case KDUMP_ELF64: dump_Elf64_Ehdr(nd->elf64); dump_Elf64_Phdr(nd->notes64, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf64_Phdr(nd->load64 + i, ELFREAD); offset64 = nd->notes64->p_offset; for (tot = 0; tot < nd->notes64->p_filesz; tot += len) { if (!(len = dump_Elf64_Nhdr(offset64, ELFREAD))) break; offset64 += len; } break; } dump_ramdump_data(); nd->ofp = fpsave; return TRUE; } /* * Dump an ELF file header. */ static void dump_Elf32_Ehdr(Elf32_Ehdr *elf) { char buf[BUFSIZE]; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); netdump_print("Elf32_Ehdr:\n"); netdump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); netdump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: netdump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: netdump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: netdump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: netdump_print("(ELFCLASSNUM)\n"); break; default: netdump_print("(?)\n"); break; } netdump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: netdump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: netdump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: netdump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: netdump_print("(ELFDATANUM)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) netdump_print("(EV_CURRENT)\n"); else netdump_print("(?)\n"); netdump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: netdump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: netdump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: netdump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: netdump_print("(ELFOSABI_STANDALONE)\n"); break; case ELFOSABI_LINUX: netdump_print("(ELFOSABI_LINUX)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); netdump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: netdump_print("(ET_NONE)\n"); break; case ET_REL: netdump_print("(ET_REL)\n"); break; case ET_EXEC: netdump_print("(ET_EXEC)\n"); break; case ET_DYN: netdump_print("(ET_DYN)\n"); break; case ET_CORE: netdump_print("(ET_CORE)\n"); break; case ET_NUM: netdump_print("(ET_NUM)\n"); break; case ET_LOOS: netdump_print("(ET_LOOS)\n"); break; case ET_HIOS: netdump_print("(ET_HIOS)\n"); break; case ET_LOPROC: netdump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: netdump_print("(ET_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_ARM: netdump_print("(EM_ARM)\n"); break; case EM_386: netdump_print("(EM_386)\n"); break; case EM_MIPS: netdump_print("(EM_MIPS)\n"); break; default: netdump_print("(unsupported)\n"); break; } netdump_print(" e_version: %ld ", elf->e_version); netdump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); netdump_print(" e_entry: %lx\n", elf->e_entry); netdump_print(" e_phoff: %lx\n", elf->e_phoff); netdump_print(" e_shoff: %lx\n", elf->e_shoff); netdump_print(" e_flags: %lx\n", elf->e_flags); if ((elf->e_flags & DUMP_ELF_INCOMPLETE) && (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF32)) pc->flags2 |= INCOMPLETE_DUMP; netdump_print(" e_ehsize: %x\n", elf->e_ehsize); netdump_print(" e_phentsize: %x\n", elf->e_phentsize); netdump_print(" e_phnum: %x\n", elf->e_phnum); netdump_print(" e_shentsize: %x\n", elf->e_shentsize); netdump_print(" e_shnum: %x\n", elf->e_shnum); netdump_print(" e_shstrndx: %x\n", elf->e_shstrndx); } static void dump_Elf64_Ehdr(Elf64_Ehdr *elf) { char buf[BUFSIZE]; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); netdump_print("Elf64_Ehdr:\n"); netdump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); netdump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: netdump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: netdump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: netdump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: netdump_print("(ELFCLASSNUM)\n"); break; default: netdump_print("(?)\n"); break; } netdump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: netdump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: netdump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: netdump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: netdump_print("(ELFDATANUM)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) netdump_print("(EV_CURRENT)\n"); else netdump_print("(?)\n"); netdump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: netdump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: netdump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: netdump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: netdump_print("(ELFOSABI_STANDALONE)\n"); break; case ELFOSABI_LINUX: netdump_print("(ELFOSABI_LINUX)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); netdump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: netdump_print("(ET_NONE)\n"); break; case ET_REL: netdump_print("(ET_REL)\n"); break; case ET_EXEC: netdump_print("(ET_EXEC)\n"); break; case ET_DYN: netdump_print("(ET_DYN)\n"); break; case ET_CORE: netdump_print("(ET_CORE)\n"); break; case ET_NUM: netdump_print("(ET_NUM)\n"); break; case ET_LOOS: netdump_print("(ET_LOOS)\n"); break; case ET_HIOS: netdump_print("(ET_HIOS)\n"); break; case ET_LOPROC: netdump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: netdump_print("(ET_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: netdump_print("(EM_386)\n"); break; case EM_IA_64: netdump_print("(EM_IA_64)\n"); break; case EM_PPC64: netdump_print("(EM_PPC64)\n"); break; case EM_X86_64: netdump_print("(EM_X86_64)\n"); break; case EM_S390: netdump_print("(EM_S390)\n"); break; case EM_ARM: netdump_print("(EM_ARM)\n"); break; case EM_AARCH64: netdump_print("(EM_AARCH64)\n"); break; default: netdump_print("(unsupported)\n"); break; } netdump_print(" e_version: %ld ", elf->e_version); netdump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); netdump_print(" e_entry: %lx\n", elf->e_entry); netdump_print(" e_phoff: %lx\n", elf->e_phoff); netdump_print(" e_shoff: %lx\n", elf->e_shoff); netdump_print(" e_flags: %lx\n", elf->e_flags); if ((elf->e_flags & DUMP_ELF_INCOMPLETE) && (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64)) pc->flags2 |= INCOMPLETE_DUMP; netdump_print(" e_ehsize: %x\n", elf->e_ehsize); netdump_print(" e_phentsize: %x\n", elf->e_phentsize); netdump_print(" e_phnum: %x\n", elf->e_phnum); netdump_print(" e_shentsize: %x\n", elf->e_shentsize); netdump_print(" e_shnum: %x\n", elf->e_shnum); netdump_print(" e_shstrndx: %x\n", elf->e_shstrndx); } /* * Dump a program segment header */ static void dump_Elf32_Phdr(Elf32_Phdr *prog, int store_pt_load_data) { int others; struct pt_load_segment *pls; if ((char *)prog > (nd->elf_header + nd->header_size)) error(FATAL, "Elf32_Phdr pointer: %lx ELF header end: %lx\n\n", (char *)prog, nd->elf_header + nd->header_size); if (store_pt_load_data) pls = &nd->pt_load_segments[store_pt_load_data-1]; else pls = NULL; netdump_print("Elf32_Phdr:\n"); netdump_print(" p_type: %lx ", prog->p_type); switch (prog->p_type) { case PT_NULL: netdump_print("(PT_NULL)\n"); break; case PT_LOAD: netdump_print("(PT_LOAD)\n"); break; case PT_DYNAMIC: netdump_print("(PT_DYNAMIC)\n"); break; case PT_INTERP: netdump_print("(PT_INTERP)\n"); break; case PT_NOTE: netdump_print("(PT_NOTE)\n"); break; case PT_SHLIB: netdump_print("(PT_SHLIB)\n"); break; case PT_PHDR: netdump_print("(PT_PHDR)\n"); break; case PT_NUM: netdump_print("(PT_NUM)\n"); break; case PT_LOOS: netdump_print("(PT_LOOS)\n"); break; case PT_HIOS: netdump_print("(PT_HIOS)\n"); break; case PT_LOPROC: netdump_print("(PT_LOPROC)\n"); break; case PT_HIPROC: netdump_print("(PT_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" p_offset: %ld (%lx)\n", prog->p_offset, prog->p_offset); if (store_pt_load_data) pls->file_offset = prog->p_offset; netdump_print(" p_vaddr: %lx\n", prog->p_vaddr); netdump_print(" p_paddr: %lx\n", prog->p_paddr); if (store_pt_load_data) pls->phys_start = prog->p_paddr; netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, prog->p_filesz); if (store_pt_load_data) { pls->phys_end = pls->phys_start + prog->p_filesz; pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? 0 : pls->phys_start + prog->p_memsz; } netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, prog->p_memsz); netdump_print(" p_flags: %lx (", prog->p_flags); others = 0; if (prog->p_flags & PF_X) netdump_print("PF_X", others++); if (prog->p_flags & PF_W) netdump_print("%sPF_W", others++ ? "|" : ""); if (prog->p_flags & PF_R) netdump_print("%sPF_R", others++ ? "|" : ""); netdump_print(")\n"); netdump_print(" p_align: %ld\n", prog->p_align); } static void dump_Elf64_Phdr(Elf64_Phdr *prog, int store_pt_load_data) { int others; struct pt_load_segment *pls; if (store_pt_load_data) pls = &nd->pt_load_segments[store_pt_load_data-1]; else pls = NULL; if ((char *)prog > (nd->elf_header + nd->header_size)) error(FATAL, "Elf64_Phdr pointer: %lx ELF header end: %lx\n\n", (char *)prog, nd->elf_header + nd->header_size); netdump_print("Elf64_Phdr:\n"); netdump_print(" p_type: %lx ", prog->p_type); switch (prog->p_type) { case PT_NULL: netdump_print("(PT_NULL)\n"); break; case PT_LOAD: netdump_print("(PT_LOAD)\n"); break; case PT_DYNAMIC: netdump_print("(PT_DYNAMIC)\n"); break; case PT_INTERP: netdump_print("(PT_INTERP)\n"); break; case PT_NOTE: netdump_print("(PT_NOTE)\n"); break; case PT_SHLIB: netdump_print("(PT_SHLIB)\n"); break; case PT_PHDR: netdump_print("(PT_PHDR)\n"); break; case PT_NUM: netdump_print("(PT_NUM)\n"); break; case PT_LOOS: netdump_print("(PT_LOOS)\n"); break; case PT_HIOS: netdump_print("(PT_HIOS)\n"); break; case PT_LOPROC: netdump_print("(PT_LOPROC)\n"); break; case PT_HIPROC: netdump_print("(PT_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" p_offset: %lld (%llx)\n", prog->p_offset, prog->p_offset); if (store_pt_load_data) pls->file_offset = prog->p_offset; netdump_print(" p_vaddr: %llx\n", prog->p_vaddr); netdump_print(" p_paddr: %llx\n", prog->p_paddr); if (store_pt_load_data) pls->phys_start = prog->p_paddr; netdump_print(" p_filesz: %llu (%llx)\n", prog->p_filesz, prog->p_filesz); if (store_pt_load_data) { pls->phys_end = pls->phys_start + prog->p_filesz; pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? 0 : pls->phys_start + prog->p_memsz; } netdump_print(" p_memsz: %llu (%llx)\n", prog->p_memsz, prog->p_memsz); netdump_print(" p_flags: %lx (", prog->p_flags); others = 0; if (prog->p_flags & PF_X) netdump_print("PF_X", others++); if (prog->p_flags & PF_W) netdump_print("%sPF_W", others++ ? "|" : ""); if (prog->p_flags & PF_R) netdump_print("%sPF_R", others++ ? "|" : ""); netdump_print(")\n"); netdump_print(" p_align: %lld\n", prog->p_align); } /* * VMCOREINFO * * This is a ELF note intented for makedumpfile that is exported by the * kernel that crashes and presented as ELF note to the /proc/vmcore * of the panic kernel. */ #define VMCOREINFO_NOTE_NAME "VMCOREINFO" #define VMCOREINFO_NOTE_NAME_BYTES (sizeof(VMCOREINFO_NOTE_NAME)) /* * Reads a string value from VMCOREINFO. * * Returns a string (that has to be freed by the caller) that contains the * value for key or NULL if the key has not been found. */ static char * vmcoreinfo_read_string(const char *key) { int i, j, end; size_t value_length; size_t key_length = strlen(key); char *vmcoreinfo = (char *)nd->vmcoreinfo; char *value = NULL; if (!nd->vmcoreinfo) return NULL; /* the '+ 1' is the equal sign */ for (i = 0; i < (nd->size_vmcoreinfo - key_length + 1); i++) { /* * We must also check if we're at the beginning of VMCOREINFO * or the separating newline is there, and of course if we * have a equal sign after the key. */ if ((strncmp(vmcoreinfo+i, key, key_length) == 0) && (i == 0 || vmcoreinfo[i-1] == '\n') && (vmcoreinfo[i+key_length] == '=')) { end = -1; /* Found -- search for the next newline. */ for (j = i + key_length + 1; j < nd->size_vmcoreinfo; j++) { if (vmcoreinfo[j] == '\n') { end = j; break; } } /* * If we didn't find an end, we assume it's the end * of VMCOREINFO data. */ if (end == -1) { /* Point after the end. */ end = nd->size_vmcoreinfo + 1; } value_length = end - (1+ i + key_length); value = calloc(value_length+1, sizeof(char)); if (value) strncpy(value, vmcoreinfo + i + key_length + 1, value_length); break; } } return value; } /* * Reads an integer value from VMCOREINFO. */ static long vmcoreinfo_read_integer(const char *key, long default_value) { char *string; long retval = default_value; string = vmcoreinfo_read_string(key); if (string) { retval = atol(string); free(string); } return retval; } /* * Dump a note section header -- the actual data is defined by netdump */ static size_t dump_Elf32_Nhdr(Elf32_Off offset, int store) { int i, lf; Elf32_Nhdr *note; size_t len; char buf[BUFSIZE]; char *ptr; ulong *uptr; int xen_core, vmcoreinfo, vmcoreinfo_xen, eraseinfo, qemuinfo; uint64_t remaining, notesize; note = (Elf32_Nhdr *)((char *)nd->elf32 + offset); BZERO(buf, BUFSIZE); xen_core = vmcoreinfo = eraseinfo = qemuinfo = FALSE; ptr = (char *)note + sizeof(Elf32_Nhdr); if (ptr > (nd->elf_header + nd->header_size)) { error(WARNING, "Elf32_Nhdr pointer: %lx ELF header end: %lx\n", (char *)note, nd->elf_header + nd->header_size); return 0; } else remaining = (uint64_t)((nd->elf_header + nd->header_size) - ptr); notesize = (uint64_t)note->n_namesz + (uint64_t)note->n_descsz; if ((note->n_namesz == 0) || !remaining || (notesize > remaining)) { error(WARNING, "possibly corrupt Elf32_Nhdr: " "n_namesz: %ld n_descsz: %ld n_type: %lx\n%s", note->n_namesz, note->n_descsz, note->n_type, note->n_namesz || note->n_descsz || !remaining ? "\n" : ""); if (note->n_namesz || note->n_descsz || !remaining) return 0; } netdump_print("Elf32_Nhdr:\n"); netdump_print(" n_namesz: %ld ", note->n_namesz); BCOPY(ptr, buf, note->n_namesz); netdump_print("(\"%s\")\n", buf); netdump_print(" n_descsz: %ld\n", note->n_descsz); netdump_print(" n_type: %lx ", note->n_type); switch (note->n_type) { case NT_PRSTATUS: netdump_print("(NT_PRSTATUS)\n"); if (store) { if (!nd->nt_prstatus) nd->nt_prstatus = (void *)note; for (i = 0; i < NR_CPUS; i++) { if (!nd->nt_prstatus_percpu[i]) { nd->nt_prstatus_percpu[i] = (void *)note; nd->num_prstatus_notes++; break; } } } if (machine_type("PPC") && (nd->num_prstatus_notes > 0)) pc->flags2 |= ELF_NOTES; break; case NT_PRPSINFO: netdump_print("(NT_PRPSINFO)\n"); if (store) nd->nt_prpsinfo = (void *)note; break; case NT_TASKSTRUCT: netdump_print("(NT_TASKSTRUCT)\n"); if (store) { nd->nt_taskstruct = (void *)note; nd->task_struct = *((ulong *)(ptr + note->n_namesz)); nd->switch_stack = *((ulong *) (ptr + note->n_namesz + sizeof(ulong))); } break; case NT_DISKDUMP: netdump_print("(NT_DISKDUMP)\n"); uptr = (ulong *)(ptr + note->n_namesz); if (*uptr && store) nd->flags |= PARTIAL_DUMP; break; #ifdef NOTDEF /* * Note: Based upon the original, abandoned, proposal for * its contents -- keep around for potential future use. */ case NT_KDUMPINFO: netdump_print("(NT_KDUMPINFO)\n"); if (store) { uptr = (note->n_namesz == 5) ? (ulong *)(ptr + ((note->n_namesz + 3) & ~3)) : (ulong *)(ptr + note->n_namesz); nd->page_size = (uint)(1 << *uptr); uptr++; nd->task_struct = *uptr; } break; #endif default: xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); if (STRNEQ(buf, "VMCOREINFO_XEN")) vmcoreinfo_xen = TRUE; else vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); eraseinfo = STRNEQ(buf, "ERASEINFO"); qemuinfo = STRNEQ(buf, "QEMU"); if (xen_core) { netdump_print("(unknown Xen n_type)\n"); if (store) error(WARNING, "unknown Xen n_type: %lx\n\n", note->n_type); } else if (vmcoreinfo) { netdump_print("(unused)\n"); nd->vmcoreinfo = (char *)(ptr + note->n_namesz + 1); nd->size_vmcoreinfo = note->n_descsz; if (READ_PAGESIZE_FROM_VMCOREINFO() && store) nd->page_size = (uint) vmcoreinfo_read_integer("PAGESIZE", 0); pc->flags2 |= VMCOREINFO; } else if (eraseinfo) { netdump_print("(unused)\n"); if (note->n_descsz) pc->flags2 |= ERASEINFO_DATA; } else if (qemuinfo) { pc->flags2 |= QEMU_MEM_DUMP_ELF; netdump_print("(QEMUCPUState)\n"); } else if (vmcoreinfo_xen) netdump_print("(unused)\n"); else netdump_print("(?)\n"); break; case NT_XEN_KDUMP_CR3: netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); /* FALL THROUGH */ case XEN_ELFNOTE_CRASH_INFO: /* * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure */ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); xen_core = TRUE; if (store) process_xen_note(note->n_type, ptr + roundup(note->n_namesz, 4), note->n_descsz); break; case XEN_ELFNOTE_CRASH_REGS: /* * x86 and x86_64: cr0, cr2, cr3, cr4 */ xen_core = TRUE; netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); break; } uptr = (ulong *)(ptr + note->n_namesz); /* * kdumps are off-by-1, because their n_namesz is 5 for "CORE". */ if ((nd->flags & KDUMP_ELF32) && (note->n_namesz == 5)) uptr = (ulong *)(ptr + ((note->n_namesz + 3) & ~3)); if (xen_core) uptr = (ulong *)roundup((ulong)uptr, 4); if (store && qemuinfo) { for(i = 0; i < NR_CPUS; i++) { if (!nd->nt_qemu_percpu[i]) { nd->nt_qemu_percpu[i] = (void *)uptr; nd->num_qemu_notes++; break; } } } if (vmcoreinfo || eraseinfo || vmcoreinfo_xen) { netdump_print(" "); ptr += note->n_namesz + 1; for (i = 0; i < note->n_descsz; i++, ptr++) { netdump_print("%c", *ptr); if (*ptr == '\n') netdump_print(" "); } lf = 0; } else { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_386, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_386, QEMU_NOTE, note, nd->ofp); } } for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { if (((i%4)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%08lx ", *uptr++); } } if (!lf || (note->n_type == NT_TASKSTRUCT) || (note->n_type == NT_DISKDUMP) || xen_core) netdump_print("\n"); len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); return len; } static size_t dump_Elf64_Nhdr(Elf64_Off offset, int store) { int i, lf; Elf64_Nhdr *note; size_t len; char buf[BUFSIZE]; char *ptr; ulonglong *uptr; int *iptr; int xen_core, vmcoreinfo, vmcoreinfo_xen, eraseinfo, qemuinfo; uint64_t remaining, notesize; note = (Elf64_Nhdr *)((char *)nd->elf64 + offset); BZERO(buf, BUFSIZE); ptr = (char *)note + sizeof(Elf64_Nhdr); xen_core = vmcoreinfo = vmcoreinfo_xen = eraseinfo = qemuinfo = FALSE; if (ptr > (nd->elf_header + nd->header_size)) { error(WARNING, "Elf64_Nhdr pointer: %lx ELF header end: %lx\n\n", (char *)note, nd->elf_header + nd->header_size); return 0; } else remaining = (uint64_t)((nd->elf_header + nd->header_size) - ptr); notesize = (uint64_t)note->n_namesz + (uint64_t)note->n_descsz; if ((note->n_namesz == 0) || !remaining || (notesize > remaining)) { error(WARNING, "possibly corrupt Elf64_Nhdr: " "n_namesz: %ld n_descsz: %ld n_type: %lx\n%s", note->n_namesz, note->n_descsz, note->n_type, note->n_namesz || note->n_descsz || !remaining ? "\n" : ""); if (note->n_namesz || note->n_descsz || !remaining) return 0; } netdump_print("Elf64_Nhdr:\n"); netdump_print(" n_namesz: %ld ", note->n_namesz); BCOPY(ptr, buf, note->n_namesz); netdump_print("(\"%s\")\n", buf); netdump_print(" n_descsz: %ld\n", note->n_descsz); netdump_print(" n_type: %lx ", note->n_type); switch (note->n_type) { case NT_PRSTATUS: netdump_print("(NT_PRSTATUS)\n"); if (store) { if (!nd->nt_prstatus) nd->nt_prstatus = (void *)note; for (i = 0; i < NR_CPUS; i++) { if (!nd->nt_prstatus_percpu[i]) { nd->nt_prstatus_percpu[i] = (void *)note; nd->num_prstatus_notes++; break; } } } break; case NT_PRPSINFO: netdump_print("(NT_PRPSINFO)\n"); if (store) nd->nt_prpsinfo = (void *)note; break; case NT_FPREGSET: netdump_print("(NT_FPREGSET)\n"); break; case NT_S390_TIMER: netdump_print("(NT_S390_TIMER)\n"); break; case NT_S390_TODCMP: netdump_print("(NT_S390_TODCMP)\n"); break; case NT_S390_TODPREG: netdump_print("(NT_S390_TODPREG)\n"); break; case NT_S390_CTRS: netdump_print("(NT_S390_CTRS)\n"); break; case NT_S390_PREFIX: netdump_print("(NT_S390_PREFIX)\n"); break; case NT_S390_VXRS_LOW: netdump_print("(NT_S390_VXRS_LOW)\n"); break; case NT_S390_VXRS_HIGH: netdump_print("(NT_S390_VXRS_HIGH)\n"); break; case NT_TASKSTRUCT: netdump_print("(NT_TASKSTRUCT)\n"); if (STRNEQ(buf, "SNAP")) pc->flags2 |= (LIVE_DUMP|SNAP); if (store) { nd->nt_taskstruct = (void *)note; nd->task_struct = *((ulong *)(ptr + note->n_namesz)); nd->switch_stack = *((ulong *) (ptr + note->n_namesz + sizeof(ulong))); } break; case NT_DISKDUMP: netdump_print("(NT_DISKDUMP)\n"); iptr = (int *)(ptr + note->n_namesz); if (*iptr && store) nd->flags |= PARTIAL_DUMP; if (note->n_descsz < sizeof(ulonglong)) netdump_print(" %08x", *iptr); break; #ifdef NOTDEF /* * Note: Based upon the original, abandoned, proposal for * its contents -- keep around for potential future use. */ case NT_KDUMPINFO: netdump_print("(NT_KDUMPINFO)\n"); if (store) { uint32_t *u32ptr; if (nd->elf64->e_machine == EM_386) { u32ptr = (note->n_namesz == 5) ? (uint *)(ptr + ((note->n_namesz + 3) & ~3)) : (uint *)(ptr + note->n_namesz); nd->page_size = 1 << *u32ptr; u32ptr++; nd->task_struct = *u32ptr; } else { uptr = (note->n_namesz == 5) ? (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)) : (ulonglong *)(ptr + note->n_namesz); nd->page_size = (uint)(1 << *uptr); uptr++; nd->task_struct = *uptr; } } break; #endif default: xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); if (STRNEQ(buf, "VMCOREINFO_XEN")) vmcoreinfo_xen = TRUE; else vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); eraseinfo = STRNEQ(buf, "ERASEINFO"); qemuinfo = STRNEQ(buf, "QEMU"); if (xen_core) { netdump_print("(unknown Xen n_type)\n"); if (store) error(WARNING, "unknown Xen n_type: %lx\n\n", note->n_type); } else if (vmcoreinfo) { netdump_print("(unused)\n"); nd->vmcoreinfo = (char *)nd->elf64 + offset + (sizeof(Elf64_Nhdr) + ((note->n_namesz + 3) & ~3)); nd->size_vmcoreinfo = note->n_descsz; if (READ_PAGESIZE_FROM_VMCOREINFO() && store) nd->page_size = (uint) vmcoreinfo_read_integer("PAGESIZE", 0); pc->flags2 |= VMCOREINFO; } else if (eraseinfo) { netdump_print("(unused)\n"); if (note->n_descsz) pc->flags2 |= ERASEINFO_DATA; } else if (qemuinfo) { pc->flags2 |= QEMU_MEM_DUMP_ELF; netdump_print("(QEMUCPUState)\n"); } else if (vmcoreinfo_xen) netdump_print("(unused)\n"); else netdump_print("(?)\n"); break; case NT_XEN_KDUMP_CR3: netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); /* FALL THROUGH */ case XEN_ELFNOTE_CRASH_INFO: /* * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure */ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); xen_core = TRUE; if (store) process_xen_note(note->n_type, ptr + roundup(note->n_namesz, 4), note->n_descsz); break; case XEN_ELFNOTE_CRASH_REGS: /* * x86 and x86_64: cr0, cr2, cr3, cr4 */ xen_core = TRUE; netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); break; } if (machine_type("S390X")) { if (store) machdep->dumpfile_init(nd->num_prstatus_notes, note); uptr = (ulonglong *) ((void *)note + roundup(sizeof(*note) + note->n_namesz, 4)); } else { uptr = (ulonglong *)(ptr + note->n_namesz); /* * kdumps are off-by-1, because their n_namesz is 5 for "CORE". */ if ((nd->flags & KDUMP_ELF64) && (note->n_namesz == 5)) uptr = (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)); if (xen_core) uptr = (ulonglong *)roundup((ulong)uptr, 4); } if (store && qemuinfo) { for(i=0; int_qemu_percpu[i]) { nd->nt_qemu_percpu[i] = (void *)uptr; nd->num_qemu_notes++; break; } } } if (BITS32() && (xen_core || (note->n_type == NT_PRSTATUS) || qemuinfo)) { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_386, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_386, QEMU_NOTE, note, nd->ofp); } } iptr = (int *)uptr; for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { if (((i%4)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%08lx ", *iptr++); } } else if (vmcoreinfo || eraseinfo || vmcoreinfo_xen) { netdump_print(" "); ptr += note->n_namesz + 1; for (i = 0; i < note->n_descsz; i++, ptr++) { netdump_print("%c", *ptr); if (*ptr == '\n') netdump_print(" "); } lf = 0; } else if (note->n_descsz == 4) { i = 0; lf = 1; iptr = (int *)uptr; netdump_print(" %08lx\n", *iptr); } else { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86_64")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_X86_64, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_X86_64, QEMU_NOTE, note, nd->ofp); } if (machine_type("PPC64") && (note->n_type == NT_PRSTATUS)) display_ELF_note(EM_PPC64, PRSTATUS_NOTE, note, nd->ofp); if (machine_type("ARM64") && (note->n_type == NT_PRSTATUS)) display_ELF_note(EM_AARCH64, PRSTATUS_NOTE, note, nd->ofp); } for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) { if (((i%2)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%016llx ", *uptr++); } } if (!lf) netdump_print("\n"); else if (i && (i&1)) netdump_print("\n"); len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); return len; } void * netdump_get_prstatus_percpu(int cpu) { int online; if ((cpu < 0) || (cpu >= nd->num_prstatus_notes)) return NULL; /* * If no cpu mapping was done, then there must be * a one-to-one relationship between the number * of online cpus and the number of notes. */ if ((online = get_cpus_online()) && (online == kt->cpus) && (online != nd->num_prstatus_notes)) return NULL; return nd->nt_prstatus_percpu[cpu]; } /* * Send the request to the proper architecture hander. */ void get_netdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) { int e_machine; if (nd->elf32) e_machine = nd->elf32->e_machine; else if (nd->elf64) e_machine = nd->elf64->e_machine; else e_machine = EM_NONE; switch (e_machine) { case EM_386: return get_netdump_regs_x86(bt, eip, esp); break; case EM_IA_64: /* For normal backtraces, this information will be obtained * frome the switch_stack structure, which is pointed to by * the thread.ksp field of the task_struct. But it's still * needed by the "bt -t" option. */ machdep->get_stack_frame(bt, eip, esp); break; case EM_PPC: return get_netdump_regs_ppc(bt, eip, esp); break; case EM_PPC64: return get_netdump_regs_ppc64(bt, eip, esp); break; case EM_X86_64: return get_netdump_regs_x86_64(bt, eip, esp); break; case EM_S390: machdep->get_stack_frame(bt, eip, esp); break; case EM_ARM: return get_netdump_regs_arm(bt, eip, esp); break; case EM_AARCH64: return get_netdump_regs_arm64(bt, eip, esp); break; case EM_MIPS: return get_netdump_regs_32(bt, eip, esp); break; default: error(FATAL, "support for ELF machine type %d not available\n", e_machine); } } /* * get regs from elf note, and return the address of user_regs. */ static char * get_regs_from_note(char *note, ulong *ip, ulong *sp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; size_t len; char *user_regs; long offset_sp, offset_ip; if (machine_type("X86_64")) { note64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); offset_sp = OFFSET(user_regs_struct_rsp); offset_ip = OFFSET(user_regs_struct_rip); } else if (machine_type("X86")) { note32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); offset_sp = OFFSET(user_regs_struct_esp); offset_ip = OFFSET(user_regs_struct_eip); } else return NULL; user_regs = note + len - SIZE(user_regs_struct) - sizeof(long); *sp = ULONG(user_regs + offset_sp); *ip = ULONG(user_regs + offset_ip); return user_regs; } void display_regs_from_elf_notes(int cpu, FILE *ofp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; size_t len; char *user_regs; int c, skipped_count; /* * Kdump NT_PRSTATUS notes are only related to online cpus, * so offline cpus should be skipped. */ if (pc->flags2 & QEMU_MEM_DUMP_ELF) skipped_count = 0; else { for (c = skipped_count = 0; c < cpu; c++) { if (check_offline_cpu(c)) skipped_count++; } } if ((cpu - skipped_count) >= nd->num_prstatus_notes) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } if (machine_type("X86_64")) { if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); user_regs = ((char *)note64) + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", ULONGLONG(user_regs + OFFSET(user_regs_struct_rip)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_eflags)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rax)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rcx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r8)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r9)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r10)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r11)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r12)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r13)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r14)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r15)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)) ); } else if (machine_type("X86")) { if (nd->num_prstatus_notes > 1) note32 = (Elf32_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note32 = (Elf32_Nhdr *)nd->nt_prstatus; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = ((char *)note32) + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " EAX: %08x EBX: %08x ECX: %08x EDX: %08x\n" " ESP: %08x EIP: %08x ESI: %08x EDI: %08x\n" " CS: %04x DS: %04x ES: %04x FS: %04x\n" " GS: %04x SS: %04x\n" " EBP: %08x EFLAGS: %08x\n", UINT(user_regs + OFFSET(user_regs_struct_eax)), UINT(user_regs + OFFSET(user_regs_struct_ebx)), UINT(user_regs + OFFSET(user_regs_struct_ecx)), UINT(user_regs + OFFSET(user_regs_struct_edx)), UINT(user_regs + OFFSET(user_regs_struct_esp)), UINT(user_regs + OFFSET(user_regs_struct_eip)), UINT(user_regs + OFFSET(user_regs_struct_esi)), UINT(user_regs + OFFSET(user_regs_struct_edi)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ds)), USHORT(user_regs + OFFSET(user_regs_struct_es)), USHORT(user_regs + OFFSET(user_regs_struct_fs)), USHORT(user_regs + OFFSET(user_regs_struct_gs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)), UINT(user_regs + OFFSET(user_regs_struct_ebp)), UINT(user_regs + OFFSET(user_regs_struct_eflags)) ); } else if (machine_type("PPC64")) { struct ppc64_elf_prstatus *prs; struct ppc64_pt_regs *pr; if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *)nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; prs = (struct ppc64_elf_prstatus *) ((char *)note64 + sizeof(Elf64_Nhdr) + note64->n_namesz); prs = (struct ppc64_elf_prstatus *)roundup((ulong)prs, 4); pr = &prs->pr_reg; fprintf(ofp, " R0: %016lx R1: %016lx R2: %016lx\n" " R3: %016lx R4: %016lx R5: %016lx\n" " R6: %016lx R7: %016lx R8: %016lx\n" " R9: %016lx R10: %016lx R11: %016lx\n" " R12: %016lx R13: %016lx R14: %016lx\n" " R15: %016lx R16: %016lx R16: %016lx\n" " R18: %016lx R19: %016lx R20: %016lx\n" " R21: %016lx R22: %016lx R23: %016lx\n" " R24: %016lx R25: %016lx R26: %016lx\n" " R27: %016lx R28: %016lx R29: %016lx\n" " R30: %016lx R31: %016lx\n" " NIP: %016lx MSR: %016lx\n" " OGPR3: %016lx CTR: %016lx\n" " LINK: %016lx XER: %016lx\n" " CCR: %016lx MQ: %016lx\n" " TRAP: %016lx DAR: %016lx\n" " DSISR: %016lx RESULT: %016lx\n", pr->gpr[0], pr->gpr[1], pr->gpr[2], pr->gpr[3], pr->gpr[4], pr->gpr[5], pr->gpr[6], pr->gpr[7], pr->gpr[8], pr->gpr[9], pr->gpr[10], pr->gpr[11], pr->gpr[12], pr->gpr[13], pr->gpr[14], pr->gpr[15], pr->gpr[16], pr->gpr[17], pr->gpr[18], pr->gpr[19], pr->gpr[20], pr->gpr[21], pr->gpr[22], pr->gpr[23], pr->gpr[24], pr->gpr[25], pr->gpr[26], pr->gpr[27], pr->gpr[28], pr->gpr[29], pr->gpr[30], pr->gpr[31], pr->nip, pr->msr, pr->orig_gpr3, pr->ctr, pr->link, pr->xer, pr->ccr, pr->mq, pr->trap, pr->dar, pr->dsisr, pr->result); } else if (machine_type("ARM64")) { if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); user_regs = (char *)note64 + len - SIZE(elf_prstatus) + OFFSET(elf_prstatus_pr_reg); fprintf(ofp, " X0: %016lx X1: %016lx X2: %016lx\n" " X3: %016lx X4: %016lx X5: %016lx\n" " X6: %016lx X7: %016lx X8: %016lx\n" " X9: %016lx X10: %016lx X11: %016lx\n" " X12: %016lx X13: %016lx X14: %016lx\n" " X15: %016lx X16: %016lx X17: %016lx\n" " X18: %016lx X19: %016lx X20: %016lx\n" " X21: %016lx X22: %016lx X23: %016lx\n" " X24: %016lx X25: %016lx X26: %016lx\n" " X27: %016lx X28: %016lx X29: %016lx\n" " LR: %016lx SP: %016lx PC: %016lx\n" " PSTATE: %08lx FPVALID: %08x\n", ULONG(user_regs + sizeof(ulong) * 0), ULONG(user_regs + sizeof(ulong) * 1), ULONG(user_regs + sizeof(ulong) * 2), ULONG(user_regs + sizeof(ulong) * 3), ULONG(user_regs + sizeof(ulong) * 4), ULONG(user_regs + sizeof(ulong) * 5), ULONG(user_regs + sizeof(ulong) * 6), ULONG(user_regs + sizeof(ulong) * 7), ULONG(user_regs + sizeof(ulong) * 8), ULONG(user_regs + sizeof(ulong) * 9), ULONG(user_regs + sizeof(ulong) * 10), ULONG(user_regs + sizeof(ulong) * 11), ULONG(user_regs + sizeof(ulong) * 12), ULONG(user_regs + sizeof(ulong) * 13), ULONG(user_regs + sizeof(ulong) * 14), ULONG(user_regs + sizeof(ulong) * 15), ULONG(user_regs + sizeof(ulong) * 16), ULONG(user_regs + sizeof(ulong) * 17), ULONG(user_regs + sizeof(ulong) * 18), ULONG(user_regs + sizeof(ulong) * 19), ULONG(user_regs + sizeof(ulong) * 20), ULONG(user_regs + sizeof(ulong) * 21), ULONG(user_regs + sizeof(ulong) * 22), ULONG(user_regs + sizeof(ulong) * 23), ULONG(user_regs + sizeof(ulong) * 24), ULONG(user_regs + sizeof(ulong) * 25), ULONG(user_regs + sizeof(ulong) * 26), ULONG(user_regs + sizeof(ulong) * 27), ULONG(user_regs + sizeof(ulong) * 28), ULONG(user_regs + sizeof(ulong) * 29), ULONG(user_regs + sizeof(ulong) * 30), ULONG(user_regs + sizeof(ulong) * 31), ULONG(user_regs + sizeof(ulong) * 32), ULONG(user_regs + sizeof(ulong) * 33), UINT(user_regs + sizeof(ulong) * 34)); } } void dump_registers_for_elf_dumpfiles(void) { int c; if (!(machine_type("X86") || machine_type("X86_64") || machine_type("ARM64") || machine_type("PPC64"))) error(FATAL, "-r option not supported for this dumpfile\n"); if (NETDUMP_DUMPFILE()) { display_regs_from_elf_notes(0, fp); return; } for (c = 0; c < kt->cpus; c++) { if (check_offline_cpu(c)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", c ? "\n" : "", c); continue; } fprintf(fp, "%sCPU %d:\n", c ? "\n" : "", c); display_regs_from_elf_notes(c, fp); } } struct x86_64_user_regs_struct { unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; unsigned long rip,cs,eflags; unsigned long rsp,ss; unsigned long fs_base, gs_base; unsigned long ds,es,fs,gs; }; struct x86_64_prstatus { int si_signo; int si_code; int si_errno; short cursig; unsigned long sigpend; unsigned long sighold; int pid; int ppid; int pgrp; int sid; struct timeval utime; struct timeval stime; struct timeval cutime; struct timeval cstime; struct x86_64_user_regs_struct regs; int fpvalid; }; static void display_prstatus_x86_64(void *note_ptr, FILE *ofp) { struct x86_64_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct x86_64_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct x86_64_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%sORIG_RAX: %lx fpvalid: %d\n" "%s R15: %016lx R14: %016lx\n" "%s R13: %016lx R12: %016lx\n" "%s RBP: %016lx RBX: %016lx\n" "%s R11: %016lx R10: %016lx\n" "%s R9: %016lx R8: %016lx\n" "%s RAX: %016lx RCX: %016lx\n" "%s RDX: %016lx RSI: %016lx\n" "%s RDI: %016lx RIP: %016lx\n" "%s RFLAGS: %016lx RSP: %016lx\n" "%s FS_BASE: %016lx\n" "%s GS_BASE: %016lx\n" "%s CS: %04lx SS: %04lx DS: %04lx\n" "%s ES: %04lx FS: %04lx GS: %04lx\n", space(sp), pr->si_signo, pr->si_code, pr->si_errno, space(sp), pr->cursig, pr->sigpend, pr->sighold, space(sp), pr->pid, pr->ppid, pr->pgrp, pr->sid, space(sp), (long long)pr->utime.tv_sec, (int)pr->utime.tv_usec, (long long)pr->stime.tv_sec, (int)pr->stime.tv_usec, space(sp), (long long)pr->cutime.tv_sec, (int)pr->cutime.tv_usec, (long long)pr->cstime.tv_sec, (int)pr->cstime.tv_usec, space(sp), pr->regs.orig_rax, pr->fpvalid, space(sp), pr->regs.r15, pr->regs.r14, space(sp), pr->regs.r13, pr->regs.r12, space(sp), pr->regs.rbp, pr->regs.rbx, space(sp), pr->regs.r11, pr->regs.r10, space(sp), pr->regs.r9, pr->regs.r8, space(sp), pr->regs.rax, pr->regs.rcx, space(sp), pr->regs.rdx, pr->regs.rsi, space(sp), pr->regs.rdi, pr->regs.rip, space(sp), pr->regs.eflags, pr->regs.rsp, space(sp), pr->regs.fs_base, space(sp), pr->regs.gs_base, space(sp), pr->regs.cs, pr->regs.ss, pr->regs.ds, space(sp), pr->regs.es, pr->regs.fs, pr->regs.gs); } struct x86_user_regs_struct { unsigned long ebx,ecx,edx,esi,edi,ebp,eax; unsigned long ds,es,fs,gs,orig_eax; unsigned long eip,cs,eflags; unsigned long esp,ss; }; struct x86_prstatus { int si_signo; int si_code; int si_errno; short cursig; unsigned long sigpend; unsigned long sighold; int pid; int ppid; int pgrp; int sid; struct timeval utime; struct timeval stime; struct timeval cutime; struct timeval cstime; struct x86_user_regs_struct regs; int fpvalid; }; static void display_prstatus_x86(void *note_ptr, FILE *ofp) { struct x86_prstatus *pr; Elf32_Nhdr *note; int sp; note = (Elf32_Nhdr *)note_ptr; pr = (struct x86_prstatus *)( (char *)note + sizeof(Elf32_Nhdr) + note->n_namesz); pr = (struct x86_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold : %lx\n" "%spid: %d ppid: %d pgrp: %d sid: %d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%sORIG_EAX: %lx fpvalid: %d\n" "%s EBX: %08lx ECX: %08lx\n" "%s EDX: %08lx ESI: %08lx\n" "%s EDI: %08lx EBP: %08lx\n" "%s EAX: %08lx EIP: %08lx\n" "%s EFLAGS: %08lx ESP: %08lx\n" "%s DS: %04lx ES: %04lx FS: %04lx\n" "%s GS: %04lx CS: %04lx SS: %04lx\n", space(sp), pr->si_signo, pr->si_code, pr->si_errno, space(sp), pr->cursig, pr->sigpend, pr->sighold, space(sp), pr->pid, pr->ppid, pr->pgrp, pr->sid, space(sp), (long long)pr->utime.tv_sec, (int)pr->utime.tv_usec, (long long)pr->stime.tv_sec, (int)pr->stime.tv_usec, space(sp), (long long)pr->cutime.tv_sec, (int)pr->cutime.tv_usec, (long long)pr->cstime.tv_sec, (int)pr->cstime.tv_usec, space(sp), pr->regs.orig_eax, pr->fpvalid, space(sp), pr->regs.ebx, pr->regs.ecx, space(sp), pr->regs.edx, pr->regs.esi, space(sp), pr->regs.edi, pr->regs.ebp, space(sp), pr->regs.eax, pr->regs.eip, space(sp), pr->regs.eflags, pr->regs.esp, space(sp), pr->regs.ds, pr->regs.es, pr->regs.fs, space(sp), pr->regs.gs, pr->regs.cs, pr->regs.ss); } static void display_qemu_x86_64(void *note_ptr, FILE *ofp) { int i, sp; Elf64_Nhdr *note; QEMUCPUState *ptr; QEMUCPUSegment *seg; char *seg_names[] = {"CS", "DS", "ES", "FS", "GS", "SS", "LDT", "TR", "GDT", "IDT"}; note = (Elf64_Nhdr *)note_ptr; ptr = (QEMUCPUState *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); ptr = (QEMUCPUState *)roundup((ulong)ptr, 4); seg = &(ptr->cs); sp = VMCORE_VALID()? 25 : 22; fprintf(ofp, "%sversion: %d size: %d\n" "%sRAX: %016llx RBX: %016llx\n" "%sRCX: %016llx RDX: %016llx\n" "%sRSI: %016llx RDI: %016llx\n" "%sRSP: %016llx RBP: %016llx\n" "%sRIP: %016llx RFLAGS: %016llx\n" "%s R8: %016llx R9: %016llx\n" "%sR10: %016llx R11: %016llx\n" "%sR12: %016llx R13: %016llx\n" "%sR14: %016llx R15: %016llx\n", space(sp), ptr->version, ptr->size, space(sp), (ulonglong)ptr->rax, (ulonglong)ptr->rbx, space(sp), (ulonglong)ptr->rcx, (ulonglong)ptr->rdx, space(sp), (ulonglong)ptr->rsi, (ulonglong)ptr->rdi, space(sp), (ulonglong)ptr->rsp, (ulonglong)ptr->rbp, space(sp), (ulonglong)ptr->rip, (ulonglong)ptr->rflags, space(sp), (ulonglong)ptr->r8, (ulonglong)ptr->r9, space(sp), (ulonglong)ptr->r10, (ulonglong)ptr->r11, space(sp), (ulonglong)ptr->r12, (ulonglong)ptr->r13, space(sp), (ulonglong)ptr->r14, (ulonglong)ptr->r15); for (i = 0; i < sizeof(seg_names)/sizeof(seg_names[0]); i++) { fprintf(ofp, "%s%s", space(sp), strlen(seg_names[i]) > 2 ? "" : " "); fprintf(ofp, "%s: " "selector: %04x limit: %08x flags: %08x\n" "%spad: %08x base: %016llx\n", seg_names[i], seg->selector, seg->limit, seg->flags, space(sp+5), seg->pad, (ulonglong)seg->base); seg++; } fprintf(ofp, "%sCR0: %016llx CR1: %016llx\n" "%sCR2: %016llx CR3: %016llx\n" "%sCR4: %016llx\n", space(sp), (ulonglong)ptr->cr[0], (ulonglong)ptr->cr[1], space(sp), (ulonglong)ptr->cr[2], (ulonglong)ptr->cr[3], space(sp), (ulonglong)ptr->cr[4]); } static void display_qemu_x86(void *note_ptr, FILE *ofp) { int i, sp; Elf32_Nhdr *note; QEMUCPUState *ptr; QEMUCPUSegment *seg; char *seg_names[] = {"CS", "DS", "ES", "FS", "GS", "SS", "LDT", "TR", "GDT", "IDT"}; note = (Elf32_Nhdr *)note_ptr; ptr = (QEMUCPUState *)( (char *)note + sizeof(Elf32_Nhdr) + note->n_namesz); ptr = (QEMUCPUState *)roundup((ulong)ptr, 4); seg = &(ptr->cs); sp = VMCORE_VALID()? 25 : 22; fprintf(ofp, "%sversion: %d size: %d\n" "%sEAX: %016llx EBX: %016llx\n" "%sECX: %016llx EDX: %016llx\n" "%sESI: %016llx EDI: %016llx\n" "%sESP: %016llx EBP: %016llx\n" "%sEIP: %016llx EFLAGS: %016llx\n", space(sp), ptr->version, ptr->size, space(sp), (ulonglong)ptr->rax, (ulonglong)ptr->rbx, space(sp), (ulonglong)ptr->rcx, (ulonglong)ptr->rdx, space(sp), (ulonglong)ptr->rsi, (ulonglong)ptr->rdi, space(sp), (ulonglong)ptr->rsp, (ulonglong)ptr->rbp, space(sp), (ulonglong)ptr->rip, (ulonglong)ptr->rflags); for(i = 0; i < sizeof(seg_names)/sizeof(seg_names[0]); i++) { fprintf(ofp, "%s%s", space(sp), strlen(seg_names[i]) > 2 ? "" : " "); fprintf(ofp, "%s: " "selector: %04x limit: %08x flags: %08x\n" "%spad: %08x base: %016llx\n", seg_names[i], seg->selector, seg->limit, seg->flags, space(sp+5), seg->pad, (ulonglong)seg->base); seg++; } fprintf(ofp, "%sCR0: %016llx CR1: %016llx\n" "%sCR2: %016llx CR3: %016llx\n" "%sCR4: %016llx\n", space(sp), (ulonglong)ptr->cr[0], (ulonglong)ptr->cr[1], space(sp), (ulonglong)ptr->cr[2], (ulonglong)ptr->cr[3], space(sp), (ulonglong)ptr->cr[4]); } static void display_prstatus_ppc64(void *note_ptr, FILE *ofp) { struct ppc64_elf_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct ppc64_elf_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct ppc64_elf_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%s R0: %016lx R1: %016lx R2: %016lx\n" "%s R3: %016lx R4: %016lx R5: %016lx\n" "%s R6: %016lx R7: %016lx R8: %016lx\n" "%s R9: %016lx R10: %016lx R11: %016lx\n" "%sR12: %016lx R13: %016lx R14: %016lx\n" "%sR15: %016lx R16: %016lx R16: %016lx\n" "%sR18: %016lx R19: %016lx R20: %016lx\n" "%sR21: %016lx R22: %016lx R23: %016lx\n" "%sR24: %016lx R25: %016lx R26: %016lx\n" "%sR27: %016lx R28: %016lx R29: %016lx\n" "%sR30: %016lx R31: %016lx\n" "%s NIP: %016lx MSR: %016lx\n" "%sOGPR3: %016lx CTR: %016lx\n" "%s LINK: %016lx XER: %016lx\n" "%s CCR: %016lx MQ: %016lx\n" "%s TRAP: %016lx DAR: %016lx\n" "%sDSISR: %016lx RESULT: %016lx\n", space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec, space(sp), pr->pr_reg.gpr[0], pr->pr_reg.gpr[1], pr->pr_reg.gpr[2], space(sp), pr->pr_reg.gpr[3], pr->pr_reg.gpr[4], pr->pr_reg.gpr[5], space(sp), pr->pr_reg.gpr[6], pr->pr_reg.gpr[7], pr->pr_reg.gpr[8], space(sp), pr->pr_reg.gpr[9], pr->pr_reg.gpr[10], pr->pr_reg.gpr[11], space(sp), pr->pr_reg.gpr[12], pr->pr_reg.gpr[13], pr->pr_reg.gpr[14], space(sp), pr->pr_reg.gpr[15], pr->pr_reg.gpr[16], pr->pr_reg.gpr[17], space(sp), pr->pr_reg.gpr[18], pr->pr_reg.gpr[19], pr->pr_reg.gpr[20], space(sp), pr->pr_reg.gpr[21], pr->pr_reg.gpr[22], pr->pr_reg.gpr[23], space(sp), pr->pr_reg.gpr[24], pr->pr_reg.gpr[25], pr->pr_reg.gpr[26], space(sp), pr->pr_reg.gpr[27], pr->pr_reg.gpr[28], pr->pr_reg.gpr[29], space(sp), pr->pr_reg.gpr[30], pr->pr_reg.gpr[31], space(sp), pr->pr_reg.nip, pr->pr_reg.msr, space(sp), pr->pr_reg.orig_gpr3, pr->pr_reg.ctr, space(sp), pr->pr_reg.link, pr->pr_reg.xer, space(sp), pr->pr_reg.ccr, pr->pr_reg.mq, space(sp), pr->pr_reg.trap, pr->pr_reg.dar, space(sp), pr->pr_reg.dsisr, pr->pr_reg.result); } struct arm64_elf_siginfo { int si_signo; int si_code; int si_errno; }; struct arm64_elf_prstatus { struct arm64_elf_siginfo pr_info; short pr_cursig; unsigned long pr_sigpend; unsigned long pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct timeval pr_utime; struct timeval pr_stime; struct timeval pr_cutime; struct timeval pr_cstime; /* arm64_elf_gregset_t pr_reg; -> typedef unsigned long [34] arm64_elf_gregset_t */ unsigned long pr_reg[34]; int pr_fpvalid; }; /* Note that the ARM64 elf_gregset_t includes the 31 numbered registers plus the sp, pc and pstate: typedef unsigned long [34] elf_gregset_t; struct pt_regs { union { struct user_pt_regs user_regs; struct { u64 regs[31]; u64 sp; u64 pc; u64 pstate; }; }; u64 orig_x0; u64 syscallno; } */ static void display_prstatus_arm64(void *note_ptr, FILE *ofp) { struct arm64_elf_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct arm64_elf_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct arm64_elf_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n", space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec); fprintf(ofp, "%s X0: %016lx X1: %016lx X2: %016lx\n" "%s X3: %016lx X4: %016lx X5: %016lx\n" "%s X6: %016lx X7: %016lx X8: %016lx\n" "%s X9: %016lx X10: %016lx X11: %016lx\n" "%sX12: %016lx X13: %016lx X14: %016lx\n" "%sX15: %016lx X16: %016lx X17: %016lx\n" "%sX18: %016lx X19: %016lx X20: %016lx\n" "%sX21: %016lx X22: %016lx X23: %016lx\n" "%sX24: %016lx X25: %016lx X26: %016lx\n" "%sX27: %016lx X28: %016lx X29: %016lx\n" "%s LR: %016lx SP: %016lx PC: %016lx\n" "%sPSTATE: %08lx FPVALID: %08x\n", space(sp), pr->pr_reg[0], pr->pr_reg[1], pr->pr_reg[2], space(sp), pr->pr_reg[3], pr->pr_reg[4], pr->pr_reg[5], space(sp), pr->pr_reg[6], pr->pr_reg[7], pr->pr_reg[8], space(sp), pr->pr_reg[9], pr->pr_reg[10], pr->pr_reg[11], space(sp), pr->pr_reg[12], pr->pr_reg[13], pr->pr_reg[14], space(sp), pr->pr_reg[15], pr->pr_reg[16], pr->pr_reg[17], space(sp), pr->pr_reg[18], pr->pr_reg[19], pr->pr_reg[20], space(sp), pr->pr_reg[21], pr->pr_reg[22], pr->pr_reg[23], space(sp), pr->pr_reg[24], pr->pr_reg[25], pr->pr_reg[26], space(sp), pr->pr_reg[27], pr->pr_reg[28], pr->pr_reg[29], space(sp), pr->pr_reg[30], pr->pr_reg[31], pr->pr_reg[32], space(sp), pr->pr_reg[33], pr->pr_fpvalid); } void display_ELF_note(int machine, int type, void *note, FILE *ofp) { if (note == NULL) return; switch (machine) { case EM_386: switch (type) { case PRSTATUS_NOTE: display_prstatus_x86(note, ofp); break; case QEMU_NOTE: display_qemu_x86(note, ofp); break; } break; case EM_X86_64: switch (type) { case PRSTATUS_NOTE: display_prstatus_x86_64(note, ofp); break; case QEMU_NOTE: display_qemu_x86_64(note, ofp); break; } break; case EM_PPC64: switch (type) { case PRSTATUS_NOTE: display_prstatus_ppc64(note, ofp); break; } break; case EM_AARCH64: switch (type) { case PRSTATUS_NOTE: display_prstatus_arm64(note, ofp); break; } break; default: return; } } void get_netdump_regs_x86_64(struct bt_info *bt, ulong *ripp, ulong *rspp) { Elf64_Nhdr *note; size_t len; char *user_regs; ulong regs_size, rsp_offset, rip_offset; ulong rip, rsp; if (is_task_active(bt->task)) bt->flags |= BT_DUMPFILE_SEARCH; if (((NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) && VALID_STRUCT(user_regs_struct) && ((bt->task == tt->panic_task) || (pc->flags2 & QEMU_MEM_DUMP_ELF))) || (KDUMP_DUMPFILE() && (kt->flags & DWARF_UNWIND) && (bt->flags & BT_DUMPFILE_SEARCH))) { if (nd->num_prstatus_notes > 1) note = (Elf64_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); regs_size = VALID_STRUCT(user_regs_struct) ? SIZE(user_regs_struct) : sizeof(struct x86_64_user_regs_struct); rsp_offset = VALID_MEMBER(user_regs_struct_rsp) ? OFFSET(user_regs_struct_rsp) : offsetof(struct x86_64_user_regs_struct, rsp); rip_offset = VALID_MEMBER(user_regs_struct_rip) ? OFFSET(user_regs_struct_rip) : offsetof(struct x86_64_user_regs_struct, rip); user_regs = ((char *)note + len) - regs_size - sizeof(long); rsp = ULONG(user_regs + rsp_offset); rip = ULONG(user_regs + rip_offset); if (INSTACK(rsp, bt) || in_alternate_stack(bt->tc->processor, rsp)) { if (CRASHDEBUG(1)) netdump_print("ELF prstatus rsp: %lx rip: %lx\n", rsp, rip); if (KDUMP_DUMPFILE()) { *rspp = rsp; *ripp = rip; if (*ripp && *rspp) bt->flags |= BT_KDUMP_ELF_REGS; } bt->machdep = (void *)user_regs; } } if (ELF_NOTES_VALID() && (bt->flags & BT_DUMPFILE_SEARCH) && DISKDUMP_DUMPFILE() && (note = (Elf64_Nhdr *) diskdump_get_prstatus_percpu(bt->tc->processor))) { if (!note) goto no_nt_prstatus_exists; user_regs = get_regs_from_note((char *)note, &rip, &rsp); if (INSTACK(rsp, bt) || in_alternate_stack(bt->tc->processor, rsp)) { if (CRASHDEBUG(1)) netdump_print("ELF prstatus rsp: %lx rip: %lx\n", rsp, rip); *rspp = rsp; *ripp = rip; if (*ripp && *rspp) bt->flags |= BT_KDUMP_ELF_REGS; bt->machdep = (void *)user_regs; } } no_nt_prstatus_exists: machdep->get_stack_frame(bt, ripp, rspp); } /* * Netdump doesn't save state of the active tasks in the TSS, so poke around * the raw stack for some reasonable hooks. */ void get_netdump_regs_x86(struct bt_info *bt, ulong *eip, ulong *esp) { int i, search, panic, panic_task, altered; char *sym; ulong *up; ulong ipintr_eip, ipintr_esp, ipintr_func; ulong halt_eip, halt_esp, panic_eip, panic_esp; int check_hardirq, check_softirq; ulong stackbase, stacktop; Elf32_Nhdr *note; char *user_regs ATTRIBUTE_UNUSED; ulong ip, sp; if (!is_task_active(bt->task)) { machdep->get_stack_frame(bt, eip, esp); return; } panic_task = tt->panic_task == bt->task ? TRUE : FALSE; ipintr_eip = ipintr_esp = ipintr_func = panic = altered = 0; halt_eip = halt_esp = panic_eip = panic_esp = 0; check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE; search = ((bt->flags & BT_TEXT_SYMBOLS) && (tt->flags & TASK_INIT_DONE)) || (machdep->flags & OMIT_FRAME_PTR); stackbase = bt->stackbase; stacktop = bt->stacktop; retry: for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ sym = closest_symbol(*up); if (XEN_CORE_DUMPFILE()) { if (STREQ(sym, "xen_machine_kexec")) { *eip = *up; *esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); return; } if (STREQ(sym, "crash_kexec")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } } else if (STREQ(sym, "netconsole_netdump") || STREQ(sym, "netpoll_start_netdump") || STREQ(sym, "start_disk_dump") || (STREQ(sym, "crash_kexec") && !KVMDUMP_DUMPFILE()) || STREQ(sym, "disk_dump")) { crash_kexec: *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "panic")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); panic_eip = *eip; panic_esp = *esp; panic = TRUE; continue; /* keep looking for die */ } if (STREQ(sym, "die")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); for (i++, up++; i < LONGS_PER_STACK; i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } return; } if (STREQ(sym, "sysrq_handle_crash")) { next_sysrq: *eip = *up; *esp = bt->stackbase + ((char *)(up+4) - bt->stackbuf); pc->flags |= SYSRQ; for (i++, up++; i < LONGS_PER_STACK; i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, "crash_kexec") && !KVMDUMP_DUMPFILE()) goto crash_kexec; if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } if (!panic) return; } /* * Obsolete -- replaced by sysrq_handle_crash */ if (STREQ(sym, "sysrq_handle_netdump")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); pc->flags |= SYSRQ; return; } if (STREQ(sym, "crash_nmi_callback")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "stop_this_cpu")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "smp_call_function_interrupt")) { if (ipintr_eip && IS_VMALLOC_ADDR(ipintr_func) && IS_KERNEL_STATIC_TEXT(*(up - 2))) continue; ipintr_eip = *up; ipintr_esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : bt->stackbase + ((char *)(up-1) - bt->stackbuf); ipintr_func = *(up - 2); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && STREQ(sym, "safe_halt")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && !halt_eip && STREQ(sym, "xen_idle")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } } if (panic) { *eip = panic_eip; *esp = panic_esp; return; } if (ipintr_eip) { *eip = ipintr_eip; *esp = ipintr_esp; return; } if (halt_eip && halt_esp) { *eip = halt_eip; *esp = halt_esp; return; } bt->flags &= ~(BT_HARDIRQ|BT_SOFTIRQ); if (check_hardirq && (tt->hardirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->hardirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_HARDIRQ; check_hardirq = FALSE; altered = TRUE; goto retry; } if (check_softirq && (tt->softirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->softirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_SOFTIRQ; check_softirq = FALSE; altered = TRUE; goto retry; } if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE() && (note = (Elf32_Nhdr *) diskdump_get_prstatus_percpu(bt->tc->processor))) { user_regs = get_regs_from_note((char *)note, &ip, &sp); if (is_kernel_text(ip) && (((sp >= GET_STACKBASE(bt->task)) && (sp < GET_STACKTOP(bt->task))) || in_alternate_stack(bt->tc->processor, sp))) { bt->flags |= BT_KERNEL_SPACE; *eip = ip; *esp = sp; return; } if (!is_kernel_text(ip) && in_user_stack(bt->tc->task, sp)) { bt->flags |= BT_USER_SPACE; *eip = ip; *esp = sp; return; } } if (CRASHDEBUG(1)) error(INFO, "get_netdump_regs_x86: cannot find anything useful (task: %lx)\n", bt->task); if (altered) { bt->stackbase = stackbase; bt->stacktop = stacktop; alter_stackbuf(bt); } if (XEN_CORE_DUMPFILE() && !panic_task && is_task_active(bt->task) && !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS))) error(FATAL, "starting backtrace locations of the active (non-crashing) " "xen tasks\n cannot be determined: try -t or -T options\n"); if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE()) bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_32(struct bt_info *bt, ulong *eip, ulong *esp) { Elf32_Nhdr *note; size_t len; if ((bt->task == tt->panic_task) || (is_task_active(bt->task) && nd->num_prstatus_notes)) { /* * Registers are saved during the dump process for the * panic task. Whereas in kdump, regs are captured for all * CPUs if they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { if (!nd->nt_prstatus_percpu[bt->tc->processor]) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); note = (Elf32_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; } else note = (Elf32_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } no_nt_prstatus_exists: machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_ppc(struct bt_info *bt, ulong *eip, ulong *esp) { ppc_relocate_nt_prstatus_percpu(nd->nt_prstatus_percpu, &nd->num_prstatus_notes); get_netdump_regs_32(bt, eip, esp); } static void get_netdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp) { Elf64_Nhdr *note; size_t len; if ((bt->task == tt->panic_task) || (is_task_active(bt->task) && nd->num_prstatus_notes > 1)) { /* * Registers are saved during the dump process for the * panic task. Whereas in kdump, regs are captured for all * CPUs if they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { if (!nd->nt_prstatus_percpu[bt->tc->processor]) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); note = (Elf64_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; } else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } no_nt_prstatus_exists: machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_arm(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_arm64(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } int is_partial_netdump(void) { return (nd->flags & PARTIAL_DUMP ? TRUE : FALSE); } /* * kexec/kdump generated vmcore files are similar enough in * nature to netdump/diskdump such that most vmcore access * functionality may be borrowed from the equivalent netdump * function. If not, re-work them here. */ int is_kdump(char *file, ulong source_query) { return is_netdump(file, source_query); } int kdump_init(char *unused, FILE *fptr) { return netdump_init(unused, fptr); } ulong get_kdump_panic_task(void) { return get_netdump_panic_task(); } int read_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { physaddr_t paddr_in = paddr; if ((nd->flags & QEMU_MEM_DUMP_KDUMP_BACKUP) && (paddr >= nd->backup_src_start) && (paddr < nd->backup_src_start + nd->backup_src_size)) { paddr += nd->backup_offset - nd->backup_src_start; if (CRASHDEBUG(1)) error(INFO, "qemu_mem_dump: kdump backup region: %#llx => %#llx\n", paddr_in, paddr); } if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) { if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE) { if (CRASHDEBUG(8)) fprintf(fp, "read_kdump: xen_kdump_p2m(%llx): " "P2M_FAILURE\n", (ulonglong)paddr_in); return READ_ERROR; } if (CRASHDEBUG(8)) fprintf(fp, "read_kdump: xen_kdump_p2m(%llx): %llx\n", (ulonglong)paddr_in, (ulonglong)paddr); } return read_netdump(fd, bufptr, cnt, addr, paddr); } int write_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return write_netdump(fd, bufptr, cnt, addr, paddr); } void get_kdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) { get_netdump_regs(bt, eip, esp); } uint kdump_page_size(void) { uint pagesz; if (!VMCORE_VALID()) return 0; if (!(pagesz = nd->page_size)) pagesz = (uint)getpagesize(); return pagesz; } int kdump_free_memory(void) { return netdump_free_memory(); } int kdump_memory_used(void) { return netdump_memory_used(); } int kdump_memory_dump(FILE *fp) { return netdump_memory_dump(fp); } struct vmcore_data * get_kdump_vmcore_data(void) { if (!VMCORE_VALID() || !KDUMP_DUMPFILE()) return NULL; return &vmcore_data; } /* * The following set of functions are not used by the crash * source code, but are available to extension modules for * gathering register sets from ELF NT_PRSTATUS note sections. * * Contributed by: Sharyathi Nagesh (sharyath@in.ibm.com) */ static void *get_ppc_regs_from_elf_notes(struct task_context *); static void *get_ppc64_regs_from_elf_notes(struct task_context *); static void *get_x86_regs_from_elf_notes(struct task_context *); static void *get_x86_64_regs_from_elf_notes(struct task_context *); static void *get_arm_regs_from_elf_notes(struct task_context *); int get_netdump_arch(void) { int e_machine; if (nd->elf32) e_machine = nd->elf32->e_machine; else if (nd->elf64) e_machine = nd->elf64->e_machine; else e_machine = EM_NONE; return e_machine; } int exist_regs_in_elf_notes(struct task_context *tc) { if ((tc->task == tt->panic_task) || (is_task_active(tc->task) && (nd->num_prstatus_notes > 1) && (tc->processor < nd->num_prstatus_notes))) return TRUE; else return FALSE; } void * get_regs_from_elf_notes(struct task_context *tc) { int e_machine = get_netdump_arch(); switch (e_machine) { case EM_386: case EM_PPC: case EM_PPC64: case EM_X86_64: case EM_ARM: break; case EM_AARCH64: error(FATAL, "get_regs_from_elf_notes: ARM64 support TBD\n"); default: error(FATAL, "support for ELF machine type %d not available\n", e_machine); } if (!exist_regs_in_elf_notes(tc)) error(FATAL, "cannot determine register set " "for active task: %lx comm: \"%s\"\n", tc->task, tc->comm); switch(e_machine) { case EM_386: return get_x86_regs_from_elf_notes(tc); case EM_PPC: return get_ppc_regs_from_elf_notes(tc); case EM_PPC64: return get_ppc64_regs_from_elf_notes(tc); case EM_X86_64: return get_x86_64_regs_from_elf_notes(tc); case EM_ARM: return get_arm_regs_from_elf_notes(tc); case EM_AARCH64: break; /* TBD */ } return NULL; } static void * get_x86_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note_32; Elf64_Nhdr *note_64; void *note; size_t len; void *pt_regs; len = 0; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (void *)nd->nt_prstatus_percpu[tc->processor]; else note = (void *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; if (nd->elf32) { note_32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note_32->n_namesz, 4); } else if (nd->elf64) { note_64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note_64->n_namesz, 4); } pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); /* NEED TO BE FIXED: Hack to get the proper alignment */ pt_regs +=4; no_nt_prstatus_exists: return pt_regs; } static void * get_x86_64_regs_from_elf_notes(struct task_context *tc) { Elf64_Nhdr *note; size_t len; void *pt_regs; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (Elf64_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } static void * get_ppc_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note; size_t len; void *pt_regs; extern struct vmcore_data *nd; pt_regs = NULL; /* * Registers are always saved during the dump process for the * panic task. Kdump also captures registers for all CPUs if * they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { note = (Elf32_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; } else note = (Elf32_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } static void * get_ppc64_regs_from_elf_notes(struct task_context *tc) { Elf64_Nhdr *note; size_t len; void *pt_regs; extern struct vmcore_data *nd; pt_regs = NULL; /* * Registers are always saved during the dump process for the * panic task. Kdump also captures registers for all CPUs if * they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { note = (Elf64_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; } else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } /* * In case of ARM we need to determine correct PHYS_OFFSET from the kdump file. * This is done by taking lowest physical address (LMA) from given load * segments. Normally this is the right one. * * Alternative would be to store phys_base in VMCOREINFO but current kernel * kdump doesn't do that yet. */ int arm_kdump_phys_base(ulong *phys_base) { struct pt_load_segment *pls; ulong paddr = ULONG_MAX; int i; for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if (pls->phys_start < paddr) paddr = pls->phys_start; } if (paddr != ULONG_MAX) { *phys_base = paddr; return TRUE; } return FALSE; } static void * get_arm_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note_32; Elf64_Nhdr *note_64; void *note; size_t len; void *pt_regs; len = 0; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (void *)nd->nt_prstatus_percpu[tc->processor]; else note = (void *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; if (nd->elf32) { note_32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note_32->n_namesz, 4); } else if (nd->elf64) { note_64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note_64->n_namesz, 4); } pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } /* * Read from /proc/kcore. */ int read_proc_kcore(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int i; size_t readcnt; ulong kvaddr; Elf32_Phdr *lp32; Elf64_Phdr *lp64; off_t offset; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } /* * Turn the physical address into a unity-mapped kernel * virtual address, which should work for 64-bit architectures, * and for lowmem access for 32-bit architectures. */ offset = UNINITIALIZED; if (machine_type("ARM64")) kvaddr = PTOV((ulong)paddr); else kvaddr = (ulong)paddr | machdep->kvbase; readcnt = cnt; switch (pkd->flags & (KCORE_ELF32|KCORE_ELF64)) { case KCORE_ELF32: for (i = 0; i < pkd->segments; i++) { lp32 = pkd->load32 + i; if ((kvaddr >= lp32->p_vaddr) && (kvaddr < (lp32->p_vaddr + lp32->p_memsz))) { offset = (off_t)(kvaddr - lp32->p_vaddr) + (off_t)lp32->p_offset; break; } } /* * If it's not accessible via unity-mapping, check whether * it's a request for a vmalloc address that can be found * in the header. */ if (pc->curcmd_flags & MEMTYPE_KVADDR) pc->curcmd_flags &= ~MEMTYPE_KVADDR; else break; for (i = 0; i < pkd->segments; i++) { lp32 = pkd->load32 + i; if ((addr >= lp32->p_vaddr) && (addr < (lp32->p_vaddr + lp32->p_memsz))) { offset = (off_t)(addr - lp32->p_vaddr) + (off_t)lp32->p_offset; break; } } break; case KCORE_ELF64: for (i = 0; i < pkd->segments; i++) { lp64 = pkd->load64 + i; if ((kvaddr >= lp64->p_vaddr) && (kvaddr < (lp64->p_vaddr + lp64->p_memsz))) { offset = (off_t)(kvaddr - lp64->p_vaddr) + (off_t)lp64->p_offset; break; } } break; } if (offset == UNINITIALIZED) return SEEK_ERROR; if (lseek(fd, offset, SEEK_SET) != offset) perror("lseek"); if (read(fd, bufptr, readcnt) != readcnt) return READ_ERROR; return cnt; } /* * place holder -- cannot write to /proc/kcore */ int write_proc_kcore(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { error(FATAL, "cannot write to /proc/kcore\n"); return FALSE; } int is_proc_kcore(char *file, ulong source_query) { if (STREQ(file, "/proc/kcore") || same_file(file, "/proc/kcore")) { if (!is_netdump(file, source_query)) error(FATAL, "cannot translate the ELF header of /proc/kcore\n"); pkd->flags |= KCORE_LOCAL; return TRUE; } else return FALSE; } int proc_kcore_init(FILE *fp) { if (BITS32()) return proc_kcore_init_32(fp); else return proc_kcore_init_64(fp); } static int proc_kcore_init_32(FILE *fp) { Elf32_Ehdr *elf32; Elf32_Phdr *load32; char eheader[MAX_KCORE_ELF_HEADER_SIZE]; char buf[BUFSIZE]; size_t size; size = MAX_KCORE_ELF_HEADER_SIZE; if (read(pc->mfd, eheader, size) != size) { sprintf(buf, "/proc/kcore: read"); perror(buf); goto bailout; } if (lseek(pc->mfd, 0, SEEK_SET) != 0) { sprintf(buf, "/proc/kcore: lseek"); perror(buf); goto bailout; } elf32 = (Elf32_Ehdr *)&eheader[0]; load32 = (Elf32_Phdr *)&eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; pkd->segments = elf32->e_phnum - 1; size = (ulong)(load32+(elf32->e_phnum)) - (ulong)elf32; if ((pkd->elf_header = (char *)malloc(size)) == NULL) { error(INFO, "/proc/kcore: cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(&eheader[0], &pkd->elf_header[0], size); pkd->elf32 = (Elf32_Ehdr *)pkd->elf_header; pkd->load32 = (Elf32_Phdr *) &pkd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; pkd->flags |= KCORE_ELF32; if (CRASHDEBUG(1)) kcore_memory_dump(fp); return TRUE; bailout: return FALSE; } static int proc_kcore_init_64(FILE *fp) { Elf64_Ehdr *elf64; Elf64_Phdr *load64; char eheader[MAX_KCORE_ELF_HEADER_SIZE]; char buf[BUFSIZE]; size_t size; size = MAX_KCORE_ELF_HEADER_SIZE; if (read(pc->mfd, eheader, size) != size) { sprintf(buf, "/proc/kcore: read"); perror(buf); goto bailout; } if (lseek(pc->mfd, 0, SEEK_SET) != 0) { sprintf(buf, "/proc/kcore: lseek"); perror(buf); goto bailout; } elf64 = (Elf64_Ehdr *)&eheader[0]; load64 = (Elf64_Phdr *)&eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; pkd->segments = elf64->e_phnum - 1; size = (ulong)(load64+(elf64->e_phnum)) - (ulong)elf64; if ((pkd->elf_header = (char *)malloc(size)) == NULL) { error(INFO, "/proc/kcore: cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(&eheader[0], &pkd->elf_header[0], size); pkd->elf64 = (Elf64_Ehdr *)pkd->elf_header; pkd->load64 = (Elf64_Phdr *) &pkd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; pkd->flags |= KCORE_ELF64; if (CRASHDEBUG(1)) kcore_memory_dump(fp); return TRUE; bailout: return FALSE; } int kcore_memory_dump(FILE *ofp) { int i, others; Elf32_Phdr *lp32; Elf64_Phdr *lp64; if (!(pkd->flags & KCORE_LOCAL)) return FALSE; fprintf(ofp, "proc_kcore_data:\n"); fprintf(ofp, " flags: %lx (", nd->flags); others = 0; if (pkd->flags & KCORE_LOCAL) fprintf(ofp, "%sKCORE_LOCAL", others++ ? "|" : ""); if (pkd->flags & KCORE_ELF32) fprintf(ofp, "%sKCORE_ELF32", others++ ? "|" : ""); if (pkd->flags & KCORE_ELF64) fprintf(ofp, "%sKCORE_ELF64", others++ ? "|" : ""); fprintf(ofp, ")\n"); fprintf(ofp, " segments: %d\n", pkd->segments); fprintf(ofp, " elf_header: %lx\n", (ulong)pkd->elf_header); fprintf(ofp, " elf64: %lx\n", (ulong)pkd->elf64); fprintf(ofp, " load64: %lx\n", (ulong)pkd->load64); fprintf(ofp, " elf32: %lx\n", (ulong)pkd->elf32); fprintf(ofp, " load32: %lx\n\n", (ulong)pkd->load32); for (i = 0; i < pkd->segments; i++) { if (pkd->flags & KCORE_ELF32) break; lp64 = pkd->load64 + i; fprintf(ofp, " Elf64_Phdr:\n"); fprintf(ofp, " p_type: %x\n", lp64->p_type); fprintf(ofp, " p_flags: %x\n", lp64->p_flags); fprintf(ofp, " p_offset: %llx\n", (ulonglong)lp64->p_offset); fprintf(ofp, " p_vaddr: %llx\n", (ulonglong)lp64->p_vaddr); fprintf(ofp, " p_paddr: %llx\n", (ulonglong)lp64->p_paddr); fprintf(ofp, " p_filesz: %llx\n", (ulonglong)lp64->p_filesz); fprintf(ofp, " p_memsz: %llx\n", (ulonglong)lp64->p_memsz); fprintf(ofp, " p_align: %lld\n", (ulonglong)lp64->p_align); fprintf(ofp, "\n"); } for (i = 0; i < pkd->segments; i++) { if (pkd->flags & KCORE_ELF64) break; lp32 = pkd->load32 + i; fprintf(ofp, " Elf32_Phdr:\n"); fprintf(ofp, " p_type: %x\n", lp32->p_type); fprintf(ofp, " p_flags: %x\n", lp32->p_flags); fprintf(ofp, " p_offset: %x\n", lp32->p_offset); fprintf(ofp, " p_vaddr: %x\n", lp32->p_vaddr); fprintf(ofp, " p_paddr: %x\n", lp32->p_paddr); fprintf(ofp, " p_filesz: %x\n", lp32->p_filesz); fprintf(ofp, " p_memsz: %x\n", lp32->p_memsz); fprintf(ofp, " p_align: %d\n", lp32->p_align); fprintf(ofp, "\n"); } return TRUE; } static void kdump_get_osrelease(void) { char *string; if ((string = vmcoreinfo_read_string("OSRELEASE"))) { fprintf(fp, "%s\n", string); free(string); } else pc->flags2 &= ~GET_OSRELEASE; } void dump_registers_for_qemu_mem_dump(void) { int i; QEMUCPUState *ptr; FILE *fpsave; fpsave = nd->ofp; nd->ofp = fp; for (i = 0; i < nd->num_qemu_notes; i++) { ptr = (QEMUCPUState *)nd->nt_qemu_percpu[i]; if (i) netdump_print("\n"); if (hide_offline_cpu(i)) { netdump_print("CPU %d: [OFFLINE]\n", i); continue; } else netdump_print("CPU %d:\n", i); if (CRASHDEBUG(1)) netdump_print(" version:%d size:%d\n", ptr->version, ptr->size); netdump_print(" RAX: %016llx RBX: %016llx RCX: %016llx\n", ptr->rax, ptr->rbx, ptr->rcx); netdump_print(" RDX: %016llx RSI: %016llx RDI:%016llx\n", ptr->rdx, ptr->rsi, ptr->rdi); netdump_print(" RSP: %016llx RBP: %016llx ", ptr->rsp, ptr->rbp); if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64) { netdump_print(" R8: %016llx\n", ptr->r8); netdump_print(" R9: %016llx R10: %016llx R11: %016llx\n", ptr->r9, ptr->r10, ptr->r11); netdump_print(" R12: %016llx R13: %016llx R14: %016llx\n", ptr->r12, ptr->r13, ptr->r14); netdump_print(" R15: %016llx", ptr->r15); } else netdump_print("\n"); netdump_print(" RIP: %016llx RFLAGS: %08llx\n", ptr->rip, ptr->rflags); netdump_print(" CS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->cs.selector, ptr->cs.limit, ptr->cs.flags, ptr->cs.pad, ptr->cs.base); netdump_print(" DS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ds.selector, ptr->ds.limit, ptr->ds.flags, ptr->ds.pad, ptr->ds.base); netdump_print(" ES: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->es.selector, ptr->es.limit, ptr->es.flags, ptr->es.pad, ptr->es.base); netdump_print(" FS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->fs.selector, ptr->fs.limit, ptr->fs.flags, ptr->fs.pad, ptr->fs.base); netdump_print(" GS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->gs.selector, ptr->gs.limit, ptr->gs.flags, ptr->gs.pad, ptr->gs.base); netdump_print(" SS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ss.selector, ptr->ss.limit, ptr->ss.flags, ptr->ss.pad, ptr->ss.base); netdump_print(" LDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ldt.selector, ptr->ldt.limit, ptr->ldt.flags, ptr->ldt.pad, ptr->ldt.base); netdump_print(" TR: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->tr.selector, ptr->tr.limit, ptr->tr.flags, ptr->tr.pad, ptr->tr.base); netdump_print(" GDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->gdt.selector, ptr->gdt.limit, ptr->gdt.flags, ptr->gdt.pad, ptr->gdt.base); netdump_print(" IDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->idt.selector, ptr->idt.limit, ptr->idt.flags, ptr->idt.pad, ptr->idt.base); netdump_print(" CR0: %016llx CR1: %016llx CR2: %016llx\n", ptr->cr[0], ptr->cr[1], ptr->cr[2]); netdump_print(" CR3: %016llx CR4: %016llx\n", ptr->cr[3], ptr->cr[4]); } nd->ofp = fpsave; } /* * kdump saves the first 640kB physical memory for BIOS to use the * range on boot of 2nd kernel. Read request to the 640k should be * translated to the back up region. This function searches kexec * resources for the backup region. */ void kdump_backup_region_init(void) { char buf[BUFSIZE]; ulong i, total, kexec_crash_image_p, elfcorehdr_p; Elf32_Off e_phoff32; Elf64_Off e_phoff64; uint16_t e_phnum, e_phentsize; ulonglong backup_offset; ulonglong backup_src_start; ulong backup_src_size; int kimage_segment_len; size_t bufsize; struct vmcore_data *vd; struct sadump_data *sd; int is_32_bit; char typename[BUFSIZE]; e_phoff32 = e_phoff64 = 0; vd = NULL; sd = NULL; if (SADUMP_DUMPFILE()) { sd = get_sadump_data(); is_32_bit = FALSE; sprintf(typename, "sadump"); } else if (pc->flags2 & QEMU_MEM_DUMP_ELF) { vd = get_kdump_vmcore_data(); if (vd->flags & KDUMP_ELF32) is_32_bit = TRUE; else is_32_bit = FALSE; sprintf(typename, "qemu mem dump"); } else return; if (!readmem(symbol_value("kexec_crash_image"), KVADDR, &kexec_crash_image_p, sizeof(ulong), "kexec backup region: kexec_crash_image", QUIET|RETURN_ON_ERROR)) goto error; if (!kexec_crash_image_p) { if (CRASHDEBUG(1)) error(INFO, "%s: kexec_crash_image not loaded\n", typename); return; } kimage_segment_len = get_array_length("kimage.segment", NULL, STRUCT_SIZE("kexec_segment")); if (!readmem(kexec_crash_image_p + MEMBER_OFFSET("kimage", "segment"), KVADDR, buf, MEMBER_SIZE("kimage", "segment"), "kexec backup region: kexec_crash_image->segment", QUIET|RETURN_ON_ERROR)) goto error; elfcorehdr_p = 0; for (i = 0; i < kimage_segment_len; ++i) { char e_ident[EI_NIDENT]; ulong mem; mem = ULONG(buf + i * STRUCT_SIZE("kexec_segment") + MEMBER_OFFSET("kexec_segment", "mem")); if (!mem) continue; if (!readmem(mem, PHYSADDR, e_ident, SELFMAG, "elfcorehdr: e_ident", QUIET|RETURN_ON_ERROR)) goto error; if (strncmp(ELFMAG, e_ident, SELFMAG) == 0) { elfcorehdr_p = mem; break; } } if (!elfcorehdr_p) { if (CRASHDEBUG(1)) error(INFO, "%s: elfcorehdr not found in segments of kexec_crash_image\n", typename); goto error; } if (is_32_bit) { if (!readmem(elfcorehdr_p, PHYSADDR, buf, STRUCT_SIZE("elf32_hdr"), "elfcorehdr", QUIET|RETURN_ON_ERROR)) goto error; e_phnum = USHORT(buf + MEMBER_OFFSET("elf32_hdr", "e_phnum")); e_phentsize = USHORT(buf + MEMBER_OFFSET("elf32_hdr", "e_phentsize")); e_phoff32 = ULONG(buf + MEMBER_OFFSET("elf32_hdr", "e_phoff")); } else { if (!readmem(elfcorehdr_p, PHYSADDR, buf, STRUCT_SIZE("elf64_hdr"), "elfcorehdr", QUIET|RETURN_ON_ERROR)) goto error; e_phnum = USHORT(buf + MEMBER_OFFSET("elf64_hdr", "e_phnum")); e_phentsize = USHORT(buf + MEMBER_OFFSET("elf64_hdr", "e_phentsize")); e_phoff64 = ULONG(buf + MEMBER_OFFSET("elf64_hdr", "e_phoff")); } backup_src_start = backup_src_size = backup_offset = 0; for (i = 0; i < e_phnum; ++i) { uint32_t p_type; Elf32_Off p_offset32; Elf64_Off p_offset64; Elf32_Addr p_paddr32; Elf64_Addr p_paddr64; uint32_t p_memsz32; uint64_t p_memsz64; if (is_32_bit) { if (!readmem(elfcorehdr_p + e_phoff32 + i * e_phentsize, PHYSADDR, buf, e_phentsize, "elfcorehdr: program header", QUIET|RETURN_ON_ERROR)) goto error; p_type = UINT(buf+MEMBER_OFFSET("elf32_phdr","p_type")); p_offset32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_offset")); p_paddr32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_paddr")); p_memsz32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_memsz")); } else { if (!readmem(elfcorehdr_p + e_phoff64 + i * e_phentsize, PHYSADDR, buf, e_phentsize, "elfcorehdr: program header", QUIET|RETURN_ON_ERROR)) goto error; p_type = UINT(buf+MEMBER_OFFSET("elf64_phdr","p_type")); p_offset64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_offset")); p_paddr64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_paddr")); p_memsz64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_memsz")); } /* * kexec marks backup region PT_LOAD by assigning * backup region address in p_offset, and p_addr in * p_offsets for other PT_LOAD entries. */ if (is_32_bit) { if (p_type == PT_LOAD && p_paddr32 <= KEXEC_BACKUP_SRC_END && p_paddr32 != p_offset32) { backup_src_start = p_paddr32; backup_src_size = p_memsz32; backup_offset = p_offset32; if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region found: " "START: %#016llx SIZE: %#016lx OFFSET: %#016llx\n", typename, backup_src_start, backup_src_size, backup_offset); break; } } else { if (p_type == PT_LOAD && p_paddr64 <= KEXEC_BACKUP_SRC_END && p_paddr64 != p_offset64) { backup_src_start = p_paddr64; backup_src_size = p_memsz64; backup_offset = p_offset64; if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region found: " "START: %#016llx SIZE: %#016lx OFFSET: %#016llx\n", typename, backup_src_start, backup_src_size, backup_offset); break; } } } if (!backup_offset) { if (CRASHDEBUG(1)) error(WARNING, "%s: backup region not found in elfcorehdr\n", typename); return; } bufsize = BUFSIZE; for (total = 0; total < backup_src_size; total += bufsize) { char backup_buf[BUFSIZE]; int j; if (backup_src_size - total < BUFSIZE) bufsize = backup_src_size - total; if (!readmem(backup_offset + total, PHYSADDR, backup_buf, bufsize, "backup source", QUIET|RETURN_ON_ERROR)) goto error; /* * We're assuming the backup region is initialized * with 0 filled if kdump has not run. */ for (j = 0; j < bufsize; ++j) { if (backup_buf[j]) { if (SADUMP_DUMPFILE()) { sd->flags |= SADUMP_KDUMP_BACKUP; sd->backup_src_start = backup_src_start; sd->backup_src_size = backup_src_size; sd->backup_offset = backup_offset; } else if (pc->flags2 & QEMU_MEM_DUMP_ELF) { vd->flags |= QEMU_MEM_DUMP_KDUMP_BACKUP; vd->backup_src_start = backup_src_start; vd->backup_src_size = backup_src_size; vd->backup_offset = backup_offset; } if (CRASHDEBUG(1)) error(INFO, "%s: backup region is used: %llx\n", typename, backup_offset + total + j); return; } } } if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region not used\n", typename); return; error: error(WARNING, "failed to init kexec backup region\n"); } crash-7.1.4/lkcd_dump_v8.h0000664000000000000000000004075312634305150014063 0ustar rootroot/* lkcd_dump_v8.h - core analysis suite * * Forward ported from lkcd_dump_v5.h by Corey Mineyard * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net) * Copyright 2001 Matt D. Robinson. All rights reserved. * * Most of this is the same old stuff from vmdump.h, except now we're * actually a stand-alone driver plugged into the block layer interface, * with the exception that we now allow for compression modes externally * loaded (e.g., someone can come up with their own). */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _DUMP_H #define _DUMP_H //#include /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #ifndef MCLX /* * MCLX NOTE: the architecture-specific headers are being ignored until * deemed necessary; crash has never used them functionally, and only * referencing them in the dump_sgi_environment() helper routines. */ /* necessary header files */ #include /* for architecture-specific header */ #endif #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* timeval depends on architecture, two long values */ struct { uint64_t tv_sec; uint64_t tv_usec; } dh_time; /* the time of the system crash */ /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ uint64_t dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; /* size of dump buffer -- only in v9 dumps so we don't declare it here */ /* uint64_t dh_dump_buffer_size; */ } __attribute__((packed)) dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } __attribute__((packed)) dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; /* * * machine specific dump headers * */ /* * IA64 --------------------------------------------------------- */ #if defined(IA64) #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */ struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. * */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */ uint64_t dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; uint32_t dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; uint64_t dha_smp_current_task[NR_CPUS]; uint64_t dha_stack[NR_CPUS]; uint64_t dha_stack_ptr[NR_CPUS]; /* load address of kernel */ uint64_t dha_kernel_addr; } __attribute__((packed)) dump_header_asm_t; struct dump_CPU_info_ia64 { struct pt_regs dha_smp_regs; uint64_t dha_smp_current_task; uint64_t dha_stack; uint64_t dha_stack_ptr; } __attribute__((packed)) dump_CPU_info_ia64_t; typedef struct dump_CPU_info_ia64 dump_CPU_info_t; /* * i386 --------------------------------------------------------- */ #elif defined(X86) #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */ struct pt_regs { long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; int xds; int xes; long orig_eax; long eip; int xcs; long eflags; long esp; int xss; }; /* * Structure: __dump_header_asm * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the esp for i386 systems */ uint32_t dha_esp; /* the eip for i386 systems */ uint32_t dha_eip; /* the dump registers */ struct pt_regs dha_regs; /* smp specific */ uint32_t dha_smp_num_cpus; uint32_t dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; uint32_t dha_smp_current_task[NR_CPUS]; uint32_t dha_stack[NR_CPUS]; uint32_t dha_stack_ptr[NR_CPUS]; } __attribute__((packed)) dump_header_asm_t; /* * CPU specific part of dump_header_asm_t */ typedef struct dump_CPU_info_s { struct pt_regs dha_smp_regs; uint32_t dha_smp_current_task; uint32_t dha_stack; uint32_t dha_stack_ptr; } __attribute__ ((packed)) dump_CPU_info_t; /* * x86-64 --------------------------------------------------------- */ #elif defined(X86_64) /* definitions */ #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x2 /* version number */ struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long rbp; unsigned long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long rax; unsigned long rcx; unsigned long rdx; unsigned long rsi; unsigned long rdi; unsigned long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ unsigned long rip; unsigned long cs; unsigned long eflags; unsigned long rsp; unsigned long ss; /* top of stack page */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the dump registers */ struct pt_regs dha_regs; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; uint64_t dha_smp_current_task[NR_CPUS]; uint64_t dha_stack[NR_CPUS]; uint64_t dha_stack_ptr[NR_CPUS]; } __attribute__((packed)) dump_header_asm_t; /* * CPU specific part of dump_header_asm_t */ typedef struct dump_CPU_info_s { struct pt_regs dha_smp_regs; uint64_t dha_smp_current_task; uint64_t dha_stack; uint64_t dha_stack_ptr; } __attribute__ ((packed)) dump_CPU_info_t; #else #define HAVE_NO_DUMP_HEADER_ASM 1 #endif #endif /* _DUMP_H */ crash-7.1.4/va_server_v1.c0000775000000000000000000002235612634305150014103 0ustar rootroot/* va_server_v1.c - kernel crash dump file translation library * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * 11/12/99, Dave Winchell, Preserve V1 interface. */ #include #include #include #include #include #include #include #include #include "va_server.h" #include #include #include struct map_hdr_v1 *vas_map_base_v1 = (struct map_hdr_v1 *)0; /* base of tree */ #ifdef NOT_DEF #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(page_size - 1)))) #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(page_size - 1))) #endif extern u_long vas_base_va; extern u_long vas_start_va; u_long vas_end_va; void find_data_v1(u_long va, u_long *buf, u_long *len, u_long *offset); void load_data_v1(struct map_hdr_v1 *hdr, u_long index, u_long *buf, u_long *len); struct map_hdr_v1 *find_header_v1(u_long va); u_long vas_find_start_v1(void); u_long vas_find_end_v1(void); int read_maps_v1(char *crash_file); int read_map_v1(int blk_pos); extern int Page_Size; extern FILE *vas_file_p; extern void *malloc(size_t); int va_server_init_v1(char *crash_file, u_long *start, u_long *end, u_long *stride) { if(read_maps_v1(crash_file)) return -1; vas_base_va = vas_start_va = vas_find_start_v1(); vas_end_va = vas_find_end_v1(); if(start) *start = vas_start_va; if(end) *end = vas_end_va; if(stride) *stride = vas_map_base_v1->va_per_entry; return 0; } int vas_lseek_v1(u_long position, int whence) { if(whence != SEEK_SET) return -1; if(position > (vas_end_va - vas_start_va)) { printf("position 0x%lx beyond dump range of 0x%lx\n", position, (vas_end_va - vas_start_va)); return -1; } vas_base_va = vas_start_va + position; return 0; } size_t vas_read_v1(void *buf_in, size_t count) { u_long len, offset, buf, va; u_long num, output, remaining; if(count > (vas_end_va - vas_base_va)) { printf("count 0x%lx greater than remaining dump of 0x%lx\n", (ulong)count, (vas_end_va - vas_base_va)); return -1; } va = vas_base_va; remaining = count; output = (u_long)buf_in; while(remaining) { find_data_v1(va, &buf, &len, &offset); num = (remaining > (len - offset)) ? (len - offset) : remaining; bcopy((const void *)(buf+offset), (void *)output, num); remaining -= num; va += num; output += num; } vas_base_va += count; return count; } size_t vas_write_v1(void *buf_in, size_t count) { u_long len, offset, buf, va; if(count != sizeof(u_long)) { printf("count %d not %d\n", (int)count, (int)sizeof(u_long)); return -1; } va = vas_base_va; find_data_v1(va, &buf, &len, &offset); *(u_long *)(buf+offset) = *(u_long *)buf_in; vas_base_va += count; return count; } void find_data_v1(u_long va, u_long *buf, u_long *len, u_long *offset) { struct map_hdr_v1 *hdr; u_long index, off; hdr = find_header_v1(va); index = (va - hdr->start_va) / hdr->va_per_entry; off = (va - hdr->start_va) % hdr->va_per_entry; load_data_v1(hdr, index, buf, len); if(offset) *offset = off; } void vas_free_data_v1(u_long va) { struct map_hdr_v1 *hdr; u_long index; hdr = find_header_v1(va); index = (va - hdr->start_va) / hdr->va_per_entry; if(hdr->map[index].exp_data) { free((void *)hdr->map[index].exp_data); hdr->map[index].exp_data = 0; } } void load_data_v1(struct map_hdr_v1 *hdr, u_long index, u_long *buf, u_long *len) { char *compr_buf; char *exp_buf; int ret, items; uLongf destLen; if(hdr->map[index].exp_data) goto out; ret = fseek(vas_file_p, (long)((hdr->blk_offset + hdr->map[index].start_blk) * hdr->blk_size), SEEK_SET); if(ret == -1) { printf("load_data: unable to fseek, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } compr_buf = (char *)malloc(2*hdr->va_per_entry); if(!compr_buf) { printf("load_data: bad ret from malloc, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } items = fread((void *)compr_buf, sizeof(char), hdr->map[index].num_blks * hdr->blk_size, vas_file_p); if(items != hdr->map[index].num_blks * hdr->blk_size) { printf("unable to read blocks from errno = %d\n", ferror(vas_file_p)); clean_exit(1); } hdr->map[index].exp_data = exp_buf = (char *)malloc(hdr->va_per_entry); if(!exp_buf) { printf("load_data: bad ret from malloc, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } destLen = (uLongf)(2*hdr->va_per_entry); ret = uncompress((Bytef *)exp_buf, &destLen, (const Bytef *)compr_buf, (uLong)items); /* if(destLen != hdr->va_per_entry) { printf("uncompress error\n"); exit(1); } */ if(ret) { if(ret == Z_MEM_ERROR) printf("load_data, bad ret Z_MEM_ERROR from uncompress\n"); else if(ret == Z_BUF_ERROR) printf("load_data, bad ret Z_BUF_ERROR from uncompress\n"); else if(ret == Z_DATA_ERROR) printf("load_data, bad ret Z_DATA_ERROR from uncompress\n"); else printf("load_data, bad ret %d from uncompress\n", ret); clean_exit(1); } free((void *)compr_buf); out: if(buf) *buf = (u_long)hdr->map[index].exp_data; if(len) *len = hdr->va_per_entry; return; } struct map_hdr_v1 *find_header_v1(u_long va) { struct map_hdr_v1 *hdr; int found = 0; for(hdr = vas_map_base_v1; hdr; hdr = hdr->next) if((va >= hdr->start_va) && (va < hdr->end_va)) { found = 1; break; } if(found) return hdr; else return (struct map_hdr_v1 *)0; } u_long vas_find_start_v1(void) { struct map_hdr_v1 *hdr; u_long start; start = vas_map_base_v1->start_va; for(hdr = vas_map_base_v1; hdr; hdr = hdr->next) if(hdr->start_va < start) start = hdr->start_va; return start; } u_long vas_find_end_v1(void) { struct map_hdr_v1 *hdr; u_long end; end = vas_map_base_v1->end_va; for(hdr = vas_map_base_v1; hdr; hdr = hdr->next) if(hdr->end_va > end) end = hdr->end_va; return end; } int read_maps_v1(char *crash_file) { int *cur_entry_p; int ret, items, blk_pos; cur_entry_p = (int *)malloc(Page_Size); if(!cur_entry_p) { printf("read_maps: bad ret from malloc, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } bzero((void *)cur_entry_p, Page_Size); vas_file_p = fopen(crash_file, "r"); if(vas_file_p == (FILE *)0) { printf("read_maps: bad ret from fopen for %s: %s\n", crash_file, strerror(errno)); return -1; } ret = fseek(vas_file_p, (long)0, SEEK_SET); if(ret == -1) { printf("read_maps: unable to fseek in %s, errno = %d\n", crash_file, ferror(vas_file_p)); return -1; } items = fread((void *)cur_entry_p, 1, Page_Size, vas_file_p); if(items != Page_Size) { printf("read_maps: unable to read header from %s, errno = %d\n", crash_file, ferror(vas_file_p)); return -1; } ret = -1; while ((blk_pos = *cur_entry_p++)) { if (read_map_v1(blk_pos)) return -1; ret = 0; } return ret; } int read_map_v1(int blk_pos) { struct crash_map_hdr_v1 *disk_hdr; int ret, items; struct map_hdr_v1 *hdr, *hdr1; extern int console(char *, ...); hdr = (struct map_hdr_v1 *)malloc(sizeof(struct map_hdr_v1)); if(!hdr) { printf("read_map: unable to malloc mem\n"); return -1; } bzero((void *)hdr, sizeof(struct map_hdr_v1)); disk_hdr = (struct crash_map_hdr_v1 *)malloc(Page_Size); ret = fseek(vas_file_p, (long)(blk_pos*Page_Size), SEEK_SET); if(ret == -1) { console("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); free(disk_hdr); return -1; } items = fread((void *)disk_hdr, 1, Page_Size, vas_file_p); if(items != Page_Size) { return -1; } if(disk_hdr->magic[0] != CRASH_MAGIC) { console("va_server: bad magic 0x%lx\n", disk_hdr->magic[0]); return -1; } ret = fseek(vas_file_p, (long)((blk_pos + disk_hdr->map_block) * disk_hdr->blk_size), SEEK_SET); if(ret == -1) { printf("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); return -1; } hdr->map_entries = disk_hdr->map_entries; hdr->va_per_entry = disk_hdr->va_per_entry; hdr->blk_offset = blk_pos - CRASH_OFFSET_BLKS; hdr->blk_size = disk_hdr->blk_size; Page_Size = disk_hdr->blk_size; /* over-ride PAGE_SIZE */ hdr->map = (struct crash_map_entry_v1 *)malloc(hdr->map_entries * sizeof(struct crash_map_entry_v1)); items = fread((void *)hdr->map, sizeof(struct crash_map_entry_v1), hdr->map_entries, vas_file_p); if(items != hdr->map_entries) { printf("unable to read map entries, err = %d\n", errno); return -1; } hdr->start_va = hdr->map[0].start_va; hdr->end_va = hdr->start_va + hdr->map_entries * hdr->va_per_entry; if(!vas_map_base_v1) { vas_map_base_v1 = hdr; hdr->next = (struct map_hdr_v1 *)0; } else { hdr1 = vas_map_base_v1; while(hdr1->next) hdr1 = hdr1->next; hdr1->next = hdr; hdr->next = (struct map_hdr_v1 *)0; } free((void *)disk_hdr); return 0; } crash-7.1.4/remote.c0000775000000000000000000032005712634305150012773 0ustar rootroot/* remote.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2009, 2011 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2009, 2011 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include #include #include #include #include #define FAILMSG "FAIL " #define DONEMSG "DONE " #define DATAMSG "DATA " #define DATA_HDRSIZE (13) /* strlen("XXXX ") + strlen("0131072") + NULL */ #define MAXRECVBUFSIZE (131072) #define READBUFSIZE (MAXRECVBUFSIZE+DATA_HDRSIZE) #ifdef DAEMON /* * The remote daemon. */ static int daemon_init(void); static ulong daemon_htol(char *); static int daemon_is_elf_file(char *); static int daemon_mount_point(char *); static int daemon_find_booted_kernel(char *); static char **daemon_build_searchdirs(int); static int daemon_is_directory(char *); static int daemon_file_readable(char *); static int daemon_parse_line(char *, char **); static char *daemon_clean_line(char *); int console(char *, ...); static void daemon_socket_options(int); static char *no_debugging_symbols_found(char *); static ulong daemon_filesize(int); static int daemon_find_module(char *, char *, char *); static int daemon_search_directory_tree(char *, char *, char *); static int daemon_file_exists(char *, struct stat *); static int daemon_checksum(char *, long *); static void daemon_send(void *, int); static int daemon_proc_version(char *); static void handle_connection(int); struct remote_context { int sock; int remdebug; char *remdebugfile; } remote_context = { 0, 0, "/dev/null" }; struct remote_context *rc = &remote_context; int main(int argc, char **argv) { int c, sockfd, newsockfd, clilen; struct sockaddr_in serv_addr, cli_addr; struct hostent *hp; ushort tcp_port; char hostname[MAXHOSTNAMELEN]; tcp_port = 0; optind = 0; while ((c = getopt(argc, argv, "vd:")) > 0) { switch (c) { case 'v': printf("%s %s\n", basename(argv[0]), /* BASELEVEL_REVISION */ "(deprecated)"); exit(0); case 'd': rc->remdebug++; rc->remdebugfile = optarg; break; } } console("\n", getpid()); while (argv[optind]) { if (!tcp_port) tcp_port = (ushort)atoi(argv[optind]); optind++; } console("port: %d\n", tcp_port); if (gethostname(hostname, MAXHOSTNAMELEN) < 0) { console("gethostname failed: %s\n", strerror(errno)); perror("gethostname"); exit(1); } console("hostname: %s\n", hostname); if ((hp = gethostbyname(hostname)) == NULL) { console("gethostbyname failed: %s\n", hstrerror(h_errno)); perror("gethostbyname"); exit(1); } console("attempting daemon_init...\n"); if (!daemon_init()) exit(1); console("\n", getpid()); if ((sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) exit(1); BZERO((char *)&serv_addr, sizeof(serv_addr)); serv_addr.sin_family = AF_INET; BCOPY(hp->h_addr, (char *)&serv_addr.sin_addr, hp->h_length); serv_addr.sin_port = htons(tcp_port); daemon_socket_options(sockfd); if (bind(sockfd, (struct sockaddr *)&serv_addr, sizeof(serv_addr)) < 0){ console("%d: bind failed: %s\n", getpid(), strerror(errno)); exit(1); } if (listen(sockfd, 5) < 0) { console("%d: listen failed: %s\n", getpid(), strerror(errno)); exit(1); } for (;;) { clilen = sizeof(cli_addr); if ((newsockfd = accept(sockfd, (struct sockaddr *)&cli_addr, &clilen)) < 0) { console("%d: accept failed: %s\n", getpid(), strerror(errno)); exit(1); } switch (fork()) { case -1: exit(1); case 0: close(sockfd); handle_connection(newsockfd); exit(0); default: close(newsockfd); break; } close(newsockfd); } } /* * This probably doesn't do much, but it might reduce the acknowledge * negotiations somewhat. (?) */ static void daemon_socket_options(int sockfd) { int nodelay; if (setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, (char *)&nodelay, sizeof(nodelay)) < 0) console("TCP_NODELAY setsockopt error\n"); } /* * This is the child daemon that handles the incoming requests. */ #define MAX_REMOTE_FDS (10) static void handle_connection(int sock) { int i; char recvbuf[BUFSIZE]; char savebuf[BUFSIZE]; char sendbuf[BUFSIZE]; char buf1[BUFSIZE]; char readbuf[READBUFSIZE+1]; char *file; FILE *tmp, *pipe; char *p1, *p2, *p3; size_t cnt; int fds[MAX_REMOTE_FDS]; int mfd; ulong addr, total, reqsize, bufsize; fd_set rfds; int len, first, retval, done; struct stat sbuf; rc->sock = sock; console("< new connection >\n"); for (i = 0; i < MAX_REMOTE_FDS; i++) fds[i] = -1; while (TRUE) { FD_ZERO(&rfds); FD_SET(sock, &rfds); retval = select(sock+1, &rfds, NULL, NULL, NULL); BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); switch (read(sock, recvbuf, BUFSIZE-1)) { case -1: console("[read returned -1]\n"); continue; case 0: console("[read returned 0]\n"); return; default: console("[%s]: ", recvbuf); break; } if (STRNEQ(recvbuf, "OPEN ")) { strcpy(sendbuf, recvbuf); p1 = strtok(recvbuf, " "); /* OPEN */ file = strtok(NULL, " "); /* filename */ for (i = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == -1) break; } if (i < MAX_REMOTE_FDS) { if ((fds[i] = open(file, O_RDWR)) < 0) { if ((fds[i] = open(file, O_RDONLY)) < 0) strcat(sendbuf, " "); else { sprintf(buf1, " %d O_RDONLY %ld", fds[i], daemon_filesize(fds[i])); strcat(sendbuf, buf1); } } else { sprintf(buf1, " %d O_RDWR %ld", fds[i], daemon_filesize(fds[i])); strcat(sendbuf, buf1); } } else strcat(sendbuf, " "); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ_LIVE ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_LIVE */ p1 = strtok(NULL, " "); /* filename id */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ addr = daemon_htol(p2); len = atoi(p3); mfd = atoi(p1); errno = 0; BZERO(readbuf, READBUFSIZE); if (lseek(mfd, addr, SEEK_SET) == -1) len = 0; else if (read(mfd, &readbuf[DATA_HDRSIZE], len) != len) len = 0; if (!len) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } else { sprintf(readbuf, "%s%07ld", DONEMSG,(ulong)len); console("(%ld)\n", len); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "READ_NETDUMP ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_NETDUMP */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ addr = daemon_htol(p2); len = atoi(p3); BZERO(readbuf, READBUFSIZE); errno = 0; if ((len = read_netdump(UNUSED, &readbuf[DATA_HDRSIZE], len, UNUSED, addr)) < 0) len = 0; if (len) { sprintf(readbuf, "%s%07ld", DONEMSG,(ulong)len); console("(%ld)\n", (ulong)len); } else { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "READ_MCLXCD ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_MCLXCD */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ addr = daemon_htol(p2); len = atoi(p3); errno = 0; BZERO(readbuf, READBUFSIZE); if (vas_lseek(addr, SEEK_SET)) len = 0; else if (vas_read((void *) &readbuf[DATA_HDRSIZE], len) != len) len = 0; if (len) { sprintf(readbuf, "%s%07ld", DONEMSG, (ulong)len); console("(%ld)\n", (ulong)len); } else { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "CLOSE ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* SIZE */ p1 = strtok(NULL, " "); /* filename id */ mfd = atoi(p1); for (i = retval = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == mfd) { close(mfd); fds[i] = -1; retval = TRUE; break; } } sprintf(sendbuf, "%s%s", savebuf, retval ? " OK" : " "); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ */ p1 = strtok(NULL, " "); /* filename id */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ addr = daemon_htol(p2); len = atoi(p3); mfd = atoi(p1); BZERO(readbuf, READBUFSIZE); if (lseek(mfd, addr, SEEK_SET) == -1) len = 0; else if (read(mfd, readbuf, len) != len) len = 0; if (!len) { sprintf(readbuf, "%s ", savebuf); len = strlen(readbuf); console("[%s]\n", readbuf); } else console("(%ld)\n", len); daemon_send(readbuf, len); continue; } else if (STRNEQ(recvbuf, "MACHINE_PID")) { sprintf(sendbuf, "%s %s %d", recvbuf, MACHINE_TYPE, getpid()); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "TYPE ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* TYPE */ file = strtok(NULL, " "); /* filename */ if (stat(file, &sbuf) < 0) sprintf(sendbuf, "%s ", savebuf); else if (daemon_is_elf_file(file)) sprintf(sendbuf, "%s ELF", savebuf); else if (STREQ(file, "/dev/mem")) sprintf(sendbuf, "%s DEVMEM", savebuf); else if (is_netdump(file, NETDUMP_REMOTE)) sprintf(sendbuf, "%s NETDUMP", savebuf); else if (is_mclx_compressed_dump(file)) sprintf(sendbuf, "%s MCLXCD", savebuf); else if (is_lkcd_compressed_dump(file)) sprintf(sendbuf, "%s LKCD", savebuf); else if (is_s390_dump(file)) sprintf(sendbuf, "%s S390D", savebuf); else sprintf(sendbuf, "%s UNSUPPORTED", savebuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "LINUX_VERSION ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* LINUX_VERSION */ file = strtok(NULL, " "); /* filename */ sprintf(readbuf, "/usr/bin/strings %s | grep 'Linux version'", file); if ((pipe = popen(readbuf, "r"))) { BZERO(readbuf, BUFSIZE); if (fread(readbuf, sizeof(char), BUFSIZE-1, pipe) > 0) strcpy(sendbuf, readbuf); else sprintf(sendbuf, "%s ", savebuf); pclose(pipe); } else sprintf(sendbuf, "%s ", savebuf); console("[%s] (%d)\n", sendbuf, strlen(sendbuf)); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ_GZIP ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_GZIP */ p1 = strtok(NULL, " "); /* bufsize */ bufsize = atol(p1); file = strtok(NULL, " "); /* filename */ errno = 0; reqsize = bufsize - DATA_HDRSIZE; sprintf(readbuf, "/usr/bin/gzip -c %s", file); if ((pipe = popen(readbuf, "r")) == NULL) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); daemon_send(readbuf, DATA_HDRSIZE); continue; } errno = cnt = done = total = first = 0; while (!done) { BZERO(readbuf, READBUFSIZE); cnt = fread(&readbuf[DATA_HDRSIZE], sizeof(char), reqsize, pipe); total += cnt; if (feof(pipe)) { sprintf(readbuf, "%s%07ld", DONEMSG, (ulong)cnt); done = TRUE; } else if (ferror(pipe)) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); done = TRUE; } else sprintf(readbuf, "%s%07ld", DATAMSG, (ulong)cnt); console("%s[%s]\n", !first++ ? "\n" : "", readbuf); daemon_send(readbuf, bufsize); } console("GZIP total: %ld\n", total); pclose(pipe); continue; } else if (STRNEQ(recvbuf, "PROC_VERSION")) { BZERO(readbuf, READBUFSIZE); if (!daemon_proc_version(readbuf)) sprintf(readbuf, "%s ", recvbuf); console("[%s]\n", readbuf); daemon_send(readbuf, strlen(readbuf)); continue; } else if (STRNEQ(recvbuf, "DEBUGGING_SYMBOLS ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* DEBUGGING */ p2 = strtok(NULL, " "); /* filename */ sprintf(sendbuf, "%s %s", savebuf, no_debugging_symbols_found(p2)); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "PAGESIZE ")) { if (strstr(recvbuf, "LIVE")) sprintf(sendbuf, "%s %d", recvbuf, (uint)getpagesize()); else if (strstr(recvbuf, "NETDUMP")) sprintf(sendbuf, "%s %d", recvbuf, (uint)netdump_page_size()); else if (strstr(recvbuf, "MCLXCD")) sprintf(sendbuf, "%s %d", recvbuf, (uint)mclx_page_size()); else if (strstr(recvbuf, "LKCD")) sprintf(sendbuf, "%s %d", recvbuf, (uint)lkcd_page_size()); else if (strstr(recvbuf, "S390D")) sprintf(sendbuf, "%s %d", recvbuf, s390_page_size()); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "FIND_BOOTED_KERNEL")) { BZERO(readbuf, READBUFSIZE); if (daemon_find_booted_kernel(readbuf)) sprintf(sendbuf, "%s %s", recvbuf, readbuf); else sprintf(sendbuf, "%s ", recvbuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "FIND_MODULE ")) { strcpy(savebuf, recvbuf); strtok(recvbuf, " "); /* FIND_MODULE */ p1 = strtok(NULL, " "); /* release */ p2 = strtok(NULL, " "); /* module */ if (daemon_find_module(p1, p2, buf1)) { if (daemon_checksum(buf1, &total)) sprintf(sendbuf, "%s %s %lx", savebuf, buf1, total); else sprintf(sendbuf, "%s %s %lx", savebuf, buf1, (ulong)0xdeadbeef); } else sprintf(sendbuf, "%s ", savebuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "SUM ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* SUM */ p2 = strtok(NULL, " "); /* filename */ if (daemon_checksum(p2, &total)) sprintf(sendbuf, "%s %lx", savebuf, total); else sprintf(sendbuf, "%s ", savebuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "MEMORY ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* MEMORY */ p2 = strtok(NULL, " "); /* USED or FREE */ p3 = strtok(NULL, " "); /* MCLXCD, LKCD, etc. */ if (STREQ(p2, "FREE")) { if (STREQ(p3, "NETDUMP")) retval = netdump_free_memory(); else if (STREQ(p3, "MCLXCD")) retval = vas_free_memory(NULL); else if (STREQ(p3, "LKCD")) retval = lkcd_free_memory(); else if (STREQ(p3, "S390D")) retval = s390_free_memory(); } if (STREQ(p2, "USED")) { if (STREQ(p3, "NETDUMP")) retval = netdump_memory_used(); else if (STREQ(p3, "MCLXCD")) retval = vas_memory_used(); else if (STREQ(p3, "LKCD")) retval = lkcd_memory_used(); else if (STREQ(p3, "S390D")) retval = s390_memory_used(); } sprintf(sendbuf, "%s %d", savebuf, retval); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "MEMORY_DUMP")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* MEMORY_DUMP */ p1 = strtok(NULL, " "); /* bufsize */ p2 = strtok(NULL, " "); /* MCLXCD, LKCD, etc. */ bufsize = atol(p1); reqsize = bufsize - DATA_HDRSIZE; errno = 0; if ((tmp = tmpfile()) == NULL) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); daemon_send(readbuf, DATA_HDRSIZE); continue; } if (STREQ(p2, "NETDUMP")) retval = netdump_memory_dump(tmp); else if (STREQ(p2, "MCLXCD")) vas_memory_dump(tmp); else if (STREQ(p2, "LKCD")) lkcd_memory_dump(tmp); else if (STREQ(p2, "LKCD_VERBOSE")) { set_lkcd_fp(tmp); dump_lkcd_environment(0); set_lkcd_fp(NULL); } else if (STREQ(p2, "S390D")) s390_memory_dump(tmp); rewind(tmp); errno = cnt = done = total = first = 0; while (!done) { BZERO(readbuf, READBUFSIZE); cnt = fread(&readbuf[DATA_HDRSIZE], sizeof(char), reqsize, tmp); total += cnt; if (feof(tmp)) { sprintf(readbuf, "%s%07ld", DONEMSG, (ulong)cnt); done = TRUE; } else if (ferror(tmp)) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); done = TRUE; } else sprintf(readbuf, "%s%07ld", DATAMSG, (ulong)cnt); console("%s[%s]\n", !first++ ? "\n" : "", readbuf); daemon_send(readbuf, bufsize); } console("MEMORY_DUMP total: %ld\n", total); fclose(tmp); continue; } else if (STRNEQ(recvbuf, "NETDUMP_INIT ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* NETDUMP_INIT */ p2 = strtok(NULL, " "); /* fd */ p3 = strtok(NULL, " "); /* dumpfile */ mfd = atoi(p2); for (i = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == mfd) { close(mfd); fds[i] = -1; break; } } sprintf(sendbuf, "%s %s", savebuf, netdump_init(p3, NULL) ? "OK" : ""); if ((addr = get_netdump_panic_task())) { sprintf(readbuf, "\npanic_task: %lx\n", addr); strcat(sendbuf, readbuf); } console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "LKCD_DUMP_INIT ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* LKCD_DUMP_INIT */ p2 = strtok(NULL, " "); /* fd */ p3 = strtok(NULL, " "); /* dumpfile */ sprintf(sendbuf, "%s %s", savebuf, lkcd_dump_init(NULL, atoi(p2), p3) ? "OK" : ""); if ((addr = get_lkcd_panic_task())) { sprintf(readbuf, "\npanic_task: %lx\n", addr); strcat(sendbuf, readbuf); } readbuf[0] = NULLCHAR; get_lkcd_panicmsg(readbuf); if (strlen(readbuf)) { strcat(sendbuf, "panicmsg: "); strcat(sendbuf, readbuf); } console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ_LKCD ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_LKCD */ p1 = strtok(NULL, " "); /* filename id */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ mfd = atoi(p1); addr = daemon_htol(p2); len = atoi(p3); BZERO(readbuf, READBUFSIZE); errno = 0; if (!lkcd_lseek(addr)) len = 0; else if (lkcd_read((void *) &readbuf[DATA_HDRSIZE], len) != len) len = 0; if (len) { sprintf(readbuf, "%s%07ld", DONEMSG,(ulong)len); console("(%ld)\n", (ulong)len); } else { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "S390_DUMP_INIT ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* S390_DUMP_INIT */ p2 = strtok(NULL, " "); /* fd */ p3 = strtok(NULL, " "); /* filename */ mfd = atoi(p2); for (i = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == mfd) { close(mfd); fds[i] = -1; break; } } sprintf(sendbuf, "%s %s", savebuf, s390_dump_init(p3) ? "OK" : ""); if ((addr = get_s390_panic_task())) { sprintf(readbuf, "\npanic_task: %lx\n", addr); strcat(sendbuf, readbuf); } readbuf[0] = NULLCHAR; get_s390_panicmsg(readbuf); if (strlen(readbuf)) { strcat(sendbuf, "panicmsg: "); strcat(sendbuf, readbuf); } console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "S390X_DUMP_INIT ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* S390X_DUMP_INIT */ p2 = strtok(NULL, " "); /* fd */ p3 = strtok(NULL, " "); /* filename */ mfd = atoi(p2); for (i = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == mfd) { close(mfd); fds[i] = -1; break; } } sprintf(sendbuf, "%s %s", savebuf, s390x_dump_init(p3) ? "OK" : ""); if ((addr = get_s390x_panic_task())) { sprintf(readbuf, "\npanic_task: %lx\n", addr); strcat(sendbuf, readbuf); } readbuf[0] = NULLCHAR; get_s390x_panicmsg(readbuf); if (strlen(readbuf)) { strcat(sendbuf, "panicmsg: "); strcat(sendbuf, readbuf); } console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ_S390D ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_S390D */ p1 = strtok(NULL, " "); /* filename id */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ mfd = atoi(p1); addr = daemon_htol(p2); len = atoi(p3); BZERO(readbuf, READBUFSIZE); errno = 0; if ((len = read_s390_dumpfile(UNUSED, &readbuf[DATA_HDRSIZE], len, UNUSED, addr)) < 0) len = 0; if (len) { sprintf(readbuf, "%s%07ld", DONEMSG,(ulong)len); console("(%ld)\n", (ulong)len); } else { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "EXECUTE ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* EXECUTE */ p1 = strtok(NULL, " "); /* bufsize */ p2 = strtok(NULL, " "); /* MCLXCD or LKCD */ p3 = strstr(savebuf, p2); bufsize = atol(p1); reqsize = bufsize - DATA_HDRSIZE; sprintf(readbuf, "echo  | %s", p3); if ((pipe = popen(readbuf, "r")) == NULL) { BZERO(readbuf, READBUFSIZE); sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); daemon_send(readbuf, bufsize); continue; } errno = cnt = done = total = first = 0; while (!done) { BZERO(readbuf, READBUFSIZE); cnt = fread(&readbuf[DATA_HDRSIZE], sizeof(char), reqsize, pipe); total += cnt; if (feof(pipe)) { sprintf(readbuf, "%s%07ld", DONEMSG, (ulong)cnt); done = TRUE; } else if (ferror(pipe)) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); done = TRUE; } else sprintf(readbuf, "%s%07ld", DATAMSG, (ulong)cnt); console("%s[%s]\n", !first++ ? "\n" : "", readbuf); daemon_send(readbuf, bufsize); } console("EXECUTE total: %ld\n", total); pclose(pipe); continue; } else if (STRNEQ(recvbuf, "EXIT")) { sprintf(sendbuf, "%s OK", recvbuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); return; } else { sprintf(sendbuf, "%s ", recvbuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); } } } /* * Common error-checking send routine. */ #define MINSENDSIZE (1448) static void daemon_send(void *buffer, int len) { int remaining, count, ret; char *bufptr; remaining = len; bufptr = buffer; while (remaining) { count = MIN(MINSENDSIZE, remaining); switch (ret = send(rc->sock, bufptr, count, 0)) { case -1: switch (errno) { case ENOBUFS: case ENOMEM: sleep(1); continue; default: exit(1); } break; default: remaining -= ret; bufptr += ret; break; } } console("daemon_send: sent %d\n", len); } /* * debug print if the -d command line option was used. */ int console(char *fmt, ...) { char output[BUFSIZE*2]; va_list ap; int retval; FILE *fp; if (!rc->remdebug || !fmt || !strlen(fmt)) return 0; va_start(ap, fmt); (void)vsnprintf(output, BUFSIZE*2, fmt, ap); va_end(ap); if ((fp = fopen(rc->remdebugfile, "a")) == NULL) return 0; retval = fprintf(fp, "%s", output); fclose(fp); return retval; } /* * Fill in the file size of a freshly opened file. */ ulong daemon_filesize(int fd) { struct stat sbuf; if (fstat(fd, &sbuf) == 0) return(sbuf.st_size); else return 0; } /* * Check for gdb output stating "(no debugging symbols found)". */ char * no_debugging_symbols_found(char *file) { FILE *pipe; char buf[BUFSIZE]; sprintf(buf, "echo 'q' | /usr/bin/gdb %s", file); if ((pipe = popen(buf, "r")) == NULL) return "NO_GDB"; while (fgets(buf, BUFSIZE, pipe)) { if (strstr(buf, "(no debugging symbols found)")) { pclose(pipe); return "NO_DEBUG"; } } pclose(pipe); return "DEBUG_OK"; } /* * Read /proc/version into a buffer. */ static int daemon_proc_version(char *buf) { FILE *pipe; struct stat sbuf; if (stat("/proc/version", &sbuf) == -1) return FALSE; if ((pipe = popen("/bin/cat /proc/version", "r")) == NULL) return FALSE; if (fread(buf, sizeof(char), BUFSIZE-1, pipe) <= 0) { pclose(pipe); return FALSE; } pclose(pipe); return TRUE; } /* * c/o W. Richard Stevens... */ #define OPEN_MAX_GUESS (256) static int daemon_init(void) { int i; pid_t pid; int open_max; if ((pid = fork()) < 0) return FALSE; else if (pid != 0) exit(0); setsid(); chdir("/"); umask(0); if ((open_max = sysconf(_SC_OPEN_MAX)) < 0) open_max = OPEN_MAX_GUESS; for (i = 0; i < open_max; i++) close(i); signal(SIGCLD, SIG_IGN); unsetenv("DISPLAY"); return TRUE; } /* * Determine whether a file is in ELF format by checking the magic number * in the first EI_NIDENT characters of the file. If it's there, further * qualify it by doing a "file" operation on it. */ static int daemon_is_elf_file(char *s) { int fd, is_elf; char magic[EI_NIDENT]; char buf[BUFSIZE]; FILE *pipe; if ((fd = open(s, O_RDONLY)) < 0) return FALSE; if (read(fd, magic, EI_NIDENT) != EI_NIDENT) { close(fd); return FALSE; } close(fd); magic[EI_CLASS] = NULLCHAR; if (!STREQ(magic, ELFMAG)) return FALSE; sprintf(buf, "/usr/bin/file -L %s", s); if ((pipe = popen(buf, "r")) == NULL) { console("/usr/bin/strings popen failed\n"); return TRUE; } is_elf = FALSE; while (fgets(buf, BUFSIZE-1, pipe)) { if (strstr(buf, " ELF ") && strstr(buf, "executable")) { is_elf = TRUE; break; } } pclose(pipe); return is_elf; } /* * Translate ASCII hex addresses. */ static ulong daemon_htol(char *s) { long i, j; ulong n; if (strlen(s) > MAX_HEXADDR_STRLEN) exit(1); for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': continue; default: exit(0); } n = (16 * n) + j; } return(n); } /* * Adapted from filesys.c, seach the default directories for a kernel * that matches /proc/version. daemon_build_searchdirs() builds an * array of directory names. */ #define CREATE 1 #define DESTROY 0 #define DEFAULT_SEARCHDIRS 4 static int daemon_find_booted_kernel(char *namelist) { char kernel[BUFSIZE]; char command[BUFSIZE]; char buffer[BUFSIZE]; char proc_version[BUFSIZE]; char *version; char **searchdirs; int i; DIR *dirp; struct dirent *dp; FILE *pipe; int found; struct stat sbuf; console("\n"); if (stat("/proc/version", &sbuf) < 0) { console("/proc/version not found\n"); return FALSE; } if (!daemon_proc_version(proc_version)) { console("cannot read /proc/version\n"); return FALSE; } version = proc_version; searchdirs = daemon_build_searchdirs(CREATE); for (i = 0, found = FALSE; !found && searchdirs[i]; i++) { dirp = opendir(searchdirs[i]); if (!dirp) continue; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { sprintf(kernel, "%s%s", searchdirs[i], dp->d_name); if (daemon_mount_point(kernel) || !daemon_file_readable(kernel) || !daemon_is_elf_file(kernel)) continue; sprintf(command, "/usr/bin/strings %s", kernel); if ((pipe = popen(command, "r")) == NULL) { console("/usr/bin/strings popen failed\n"); continue; } while (fgets(buffer, BUFSIZE-1, pipe)) { if (STREQ(buffer, version)) { found = TRUE; break; } } pclose(pipe); if (found) break; } closedir(dirp); } daemon_mount_point(DESTROY); daemon_build_searchdirs(DESTROY); if (found) { console("booted kernel: %s\n", kernel); strcpy(namelist, kernel); return TRUE; } console("cannot find booted kernel\n"); return FALSE; } static char ** daemon_build_searchdirs(int create) { int i; int cnt; DIR *dirp; struct dirent *dp; char dirbuf[BUFSIZE]; static char **searchdirs = { 0 }; static char *default_searchdirs[DEFAULT_SEARCHDIRS+1] = { "/usr/src/linux/", "/boot/", "/boot/efi/", "/", NULL }; if (!create) { if (searchdirs) { for (i = DEFAULT_SEARCHDIRS; searchdirs[i]; i++) free(searchdirs[i]); free(searchdirs); } return NULL; } cnt = DEFAULT_SEARCHDIRS; if ((dirp = opendir("/usr/src"))) { for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) cnt++; if ((searchdirs = (char **)malloc(cnt * sizeof(char *))) == NULL) { console("/usr/src/ directory list malloc failed: %s\n", strerror(errno)); closedir(dirp); return default_searchdirs; } for (i = 0; i < DEFAULT_SEARCHDIRS; i++) searchdirs[i] = default_searchdirs[i]; cnt = DEFAULT_SEARCHDIRS; rewinddir(dirp); for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (STREQ(dp->d_name, "linux") || STREQ(dp->d_name, ".") || STREQ(dp->d_name, "..")) continue; sprintf(dirbuf, "/usr/src/%s", dp->d_name); if (daemon_mount_point(dirbuf)) continue; if (!daemon_is_directory(dirbuf)) continue; if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { console("/usr/src/ directory entry malloc failed: %s\n", strerror(errno)); break; } sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } searchdirs[cnt] = NULL; closedir(dirp); } for (i = 0; searchdirs[i]; i++) console("searchdirs[%d]: %s\n", i, searchdirs[i]); return searchdirs; } /* * Determine whether a file is a mount point, without the benefit of stat(). * This horrendous kludge is necessary to avoid uninterruptible stat() or * fstat() calls on nfs mount-points where the remote directory is no longer * available. */ static int daemon_mount_point(char *name) { int i; static int mount_points_gathered = -1; static char **mount_points; char *arglist[MAXARGS]; char buf[BUFSIZE]; char cmd[BUFSIZE]; int argc, found; struct stat sbuf; FILE *pipe; /* * The first time through, stash a list of mount points. */ if (mount_points_gathered < 0) { found = mount_points_gathered = 0; if (stat("/proc/mounts", &sbuf) == 0) sprintf(cmd, "/bin/cat /proc/mounts"); else if (stat("/etc/mtab", &sbuf) == 0) sprintf(cmd, "/bin/cat /etc/mtab"); else return FALSE; if ((pipe = popen(cmd, "r")) == NULL) return FALSE; while (fgets(buf, BUFSIZE, pipe)) { argc = daemon_parse_line(buf, arglist); if (argc < 2) continue; found++; } pclose(pipe); if (!(mount_points = (char **)malloc(sizeof(char *) * found))) return FALSE; if ((pipe = popen(cmd, "r")) == NULL) return FALSE; i = 0; while (fgets(buf, BUFSIZE, pipe) && (mount_points_gathered < found)) { argc = daemon_parse_line(buf, arglist); if (argc < 2) continue; if ((mount_points[i] = (char *) malloc(strlen(arglist[1])*2))) { strcpy(mount_points[i], arglist[1]); mount_points_gathered++, i++; } } pclose(pipe); } /* * A null name string means we're done with this routine forever, * so the malloc'd memory can be freed. */ if (!name) { for (i = 0; i < mount_points_gathered; i++) free(mount_points[i]); free(mount_points); return FALSE; } for (i = 0; i < mount_points_gathered; i++) { if (STREQ(name, mount_points[i])) return TRUE; } return FALSE; } /* * Check whether a file is a directory. */ static int daemon_is_directory(char *file) { struct stat sbuf; if (!file || !strlen(file)) return(FALSE); if (stat(file, &sbuf) == -1) return(FALSE); /* This file doesn't exist. */ return((sbuf.st_mode & S_IFMT) == S_IFDIR ? TRUE : FALSE); } /* * Check whether a file is readable. */ static int daemon_file_readable(char *file) { struct stat sbuf; long tmp; int fd; if (stat(file, &sbuf) < 0) return FALSE; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; if (read(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) { close(fd); return FALSE; } close(fd); return TRUE; } /* * Parse a line into tokens, populate the passed-in argv[] array, and return * the count of arguments found. This function modifies the passed-string * by inserting a NULL character at the end of each token. Expressions * encompassed by parentheses, and strings encompassed by apostrophes, are * collected into single tokens. */ int daemon_parse_line(char *str, char *argv[]) { int i, j; int string; int expression; for (i = 0; i < MAXARGS; i++) argv[i] = NULL; daemon_clean_line(str); if (str == NULL || strlen(str) == 0) return(0); i = j = 0; string = expression = FALSE; argv[j++] = str; while (TRUE) { if (j == MAXARGS) { console("too many arguments in string!\n"); return 0; } while (str[i] != ' ' && str[i] != '\t' && str[i] != NULLCHAR) { i++; } switch (str[i]) { case ' ': case '\t': str[i++] = NULLCHAR; if (str[i] == '"') { str[i] = ' '; string = TRUE; i++; } if (str[i] == '(') { expression = TRUE; } while (str[i] == ' ' || str[i] == '\t') { i++; } if (str[i] != NULLCHAR && str[i] != '\n') { argv[j++] = &str[i]; if (string) { string = FALSE; while (str[i] != '"' && str[i] != NULLCHAR) i++; if (str[i] == '"') str[i] = ' '; } if (expression) { expression = FALSE; while (str[i] != ')' && str[i] != NULLCHAR) i++; } break; } /* else fall through */ case '\n': str[i] = NULLCHAR; /* keep falling... */ case NULLCHAR: argv[j] = NULLCHAR; return(j); } } } /* * Strip line-beginning and line-ending whitespace and linefeeds. */ char *strip_linefeeds(char *line) { return(daemon_clean_line(line)); } static char * daemon_clean_line(char *line) { char buf[BUFSIZE]; char *p; if (line == NULL || strlen(line) == 0) return(line); strcpy(buf, line); p = &buf[0]; while (*p == ' ' || *p == '\t') p++; strcpy(line, p); if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == '\n') *p = NULLCHAR; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == ' ' || *p == '\t') { *p = NULLCHAR; if (p == line) break; p--; } return(line); } /* * Service not offered by the daemon. */ int monitor_memory(long *a1, long *a2, long *a3, long *a4) { return FALSE; } static int daemon_find_module(char *release, char *filename, char *retbuf) { char dir[BUFSIZE]; int found; found = FALSE; sprintf(dir, "%s/%s", DEFAULT_REDHAT_DEBUG_LOCATION, release); found = daemon_search_directory_tree(dir, filename, retbuf); if (!found) { sprintf(dir, "/lib/modules/%s", release); found = daemon_search_directory_tree(dir, filename, retbuf); } return found; } int daemon_search_directory_tree(char *directory, char *file, char *retbuf) { char command[BUFSIZE]; char buf[BUFSIZE]; FILE *pipe; int found; if (!daemon_file_exists("/usr/bin/find", NULL) || !daemon_file_exists("/bin/echo", NULL) || !daemon_is_directory(directory)) return FALSE; sprintf(command, "/usr/bin/find %s -name %s -print; /bin/echo search done", directory, file); if ((pipe = popen(command, "r")) == NULL) return FALSE; found = FALSE; while (fgets(buf, BUFSIZE-1, pipe) || !found) { if (STREQ(buf, "search done\n")) break; if (!found && STREQ((char *)basename(strip_linefeeds(buf)), file)) { strcpy(retbuf, buf); found = TRUE; } } pclose(pipe); return found; } static int daemon_file_exists(char *file, struct stat *sp) { struct stat sbuf; if (stat(file, sp ? sp : &sbuf) == 0) return TRUE; return FALSE; } static int daemon_checksum(char *file, long *retsum) { int i; int fd; ssize_t cnt; char buf[MIN_PAGE_SIZE]; long csum; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; csum = 0; BZERO(buf, MIN_PAGE_SIZE); while ((cnt = read(fd, buf, MIN_PAGE_SIZE)) > 0) { for (i = 0; i < cnt; i++) csum += buf[i]; BZERO(buf, MIN_PAGE_SIZE); } close(fd); *retsum = csum; return TRUE; } #else static void copy_to_local_namelist(struct remote_file *); static char *create_local_namelist(struct remote_file *); static int remote_find_booted_kernel(struct remote_file *); static int remote_proc_version(char *); static int validate_phys_base(physaddr_t, physaddr_t, physaddr_t); static int remote_file_open(struct remote_file *); static int remote_file_close(struct remote_file *); static int identical_namelist(char *, struct remote_file *); void remote_socket_options(int); static int copy_remote_file(struct remote_file *, int, char *, char *); static void copy_remote_gzip_file(struct remote_file *, char *, char *); static int remote_file_checksum(struct remote_file *); static int remote_file_type(char *); static int remote_lkcd_dump_init(void); static int remote_s390_dump_init(void); static int remote_netdump_init(void); static int remote_tcp_read(int, const char *, size_t); static int remote_tcp_read_string(int, const char *, size_t, int); static int remote_tcp_write(int, const void *, size_t); static int remote_tcp_write_string(int, const char *); struct _remote_context { uint flags; int n_cpus; int vfd; char remote_type[10]; } remote_context; #define NIL_FLAG (0x01U) #define NIL_MODE() (rc->flags & NIL_FLAG) struct _remote_context *rc = &remote_context; /* * Parse, verify and establish a connection with the network daemon * specified on the crash command line. * * The format is: [remote-hostname]:port[,remote-namelist][,remote-dumpfile] * * where everything but the port number is optional, and the remote-namelist * and remote-dumpfile can be reversed. * * 1. The default remote host is the local host. * 2. The default dumpfile is /dev/mem. * 3. If no remote-namelist and remote-dumpfile are given, the daemon * is queried for a kernel that matches the remote /proc/version. * If no local kernel namelist is entered, the remote version will * be copied locally when fd_init() is called. * 4. If a remote-dumpfile is given with no remote namelist, it is presumed * that the kernel namelist will be entered locally. */ int is_remote_daemon(char *dp) { char *p1; static char defaulthost[MAXHOSTNAMELEN+1]; char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *portp, *filep, *file1, *file2; struct hostent *hp; struct sockaddr_in serv_addr; if (!strstr(dp, ":") || file_exists(dp, NULL)) return FALSE; pc->port = 0; pc->server = pc->server_memsrc = NULL; rc->vfd = pc->rmfd = pc->rkfd = -1; file1 = file2 = NULL; if ((filep = strstr(dp, ","))) { *filep = NULLCHAR; filep++; } if (*dp == ':') { BZERO(defaulthost, MAXHOSTNAMELEN+1); gethostname(defaulthost, MAXHOSTNAMELEN); pc->server = defaulthost; portp = dp+1; } else { pc->server = strtok(dp, ":"); portp = strtok(NULL, ":"); } if (portp == NULL) return FALSE; if (decimal(portp, 0)) pc->port = (ushort)atoi(portp); else return FALSE; if (filep) { file1 = strtok(filep, ","); file2 = strtok(NULL, ","); } if (!pc->server || !pc->port) return FALSE; if (CRASHDEBUG(1)) { fprintf(fp, "server: [%s]\n", pc->server); fprintf(fp, " port: [%d]\n", pc->port); fprintf(fp, " file1: [%s]\n", file1); fprintf(fp, " file2: [%s]\n", file2); } if ((hp = gethostbyname(pc->server)) == NULL) { herror(pc->server); error(FATAL, "gethostbyname [%s] failed\n", pc->server); } if (CRASHDEBUG(1)) { struct in_addr *ip; char **listptr; listptr = hp->h_addr_list; while ((ip = (struct in_addr *) *listptr++) != NULL) printf("%s\n", inet_ntoa(*ip)); } if ((pc->sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { perror("socket"); error(FATAL, "socket call failed\n"); } BZERO((char *)&serv_addr, sizeof(struct sockaddr_in)); serv_addr.sin_family = AF_INET; BCOPY(hp->h_addr, (char *)&serv_addr.sin_addr, hp->h_length); serv_addr.sin_port = htons(pc->port); if (connect(pc->sockfd, (struct sockaddr *)&serv_addr, sizeof(struct sockaddr_in)) < 0) { herror(hp->h_name); error(FATAL, "connect [%s:%d] failed\n", hp->h_name, pc->port); clean_exit(1); } if (CRASHDEBUG(1)) printf("connect [%s:%d]: success\n", hp->h_name, pc->port); remote_socket_options(pc->sockfd); /* * Try and use NIL mode. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "NIL"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, 0); if (!strstr(recvbuf, "")) { rc->flags |= NIL_FLAG; p1 = strtok(recvbuf, " "); /* NIL */ p1 = strtok(NULL, " "); /* remote type */ if (p1 && p1[0] != 'L') pc->flags2 |= REM_PAUSED_F; } /* * Get the remote machine type and verify a match. The daemon pid * is also used as a live system initial context. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "MACHINE_PID"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); p1 = strtok(recvbuf, " "); /* MACHINE */ p1 = strtok(NULL, " "); /* machine type */ if (CRASHDEBUG(1)) printf("remote MACHINE: %s\n", p1); if (!STREQ(pc->machine_type, p1)) error(FATAL, "machine type mismatch: local: %s remote: %s\n", pc->machine_type, p1); p1 = strtok(NULL, " "); /* pid */ pc->server_pid = atol(p1); if (file1) { switch (remote_file_type(file1)) { case TYPE_ELF: pc->server_namelist = file1; break; case TYPE_NETDUMP: pc->server_memsrc = file1; pc->flags |= REM_NETDUMP; break; case TYPE_MCLXCD: pc->server_memsrc = file1; pc->flags |= REM_MCLXCD; break; case TYPE_DEVMEM: pc->server_memsrc = file1; break; case TYPE_LKCD: pc->server_memsrc = file1; pc->flags |= REM_LKCD; break; case TYPE_S390D: pc->server_memsrc = file1; pc->flags |= REM_S390D; break; } } if (file2) { switch (remote_file_type(file2)) { case TYPE_ELF: if (pc->server_namelist) error(FATAL, "two remote namelists entered: %s and %s\n", file1, file2); pc->server_namelist = file2; break; case TYPE_NETDUMP: if (pc->server_memsrc) error(FATAL, "neither %s or %s is an ELF file\n", file1, file2); pc->server_memsrc = file2; pc->flags |= REM_NETDUMP; break; case TYPE_MCLXCD: if (pc->server_memsrc) error(FATAL, "neither %s or %s is an ELF file\n", file1, file2); pc->server_memsrc = file2; pc->flags |= REM_MCLXCD; break; case TYPE_LKCD: if (pc->server_memsrc) error(FATAL, "neither %s or %s is an ELF file\n", file1, file2); pc->server_memsrc = file2; pc->flags |= REM_LKCD; break; case TYPE_S390D: if (pc->server_memsrc) error(FATAL, "neither %s or %s is an ELF file\n", file1, file2); pc->server_memsrc = file2; pc->flags |= REM_S390D; break; case TYPE_DEVMEM: pc->server_memsrc = file2; break; } } return TRUE; } /* * Determine whether a file is a kernel or a memory source. */ static int remote_file_type(char *file) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "TYPE %s", file); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) error(FATAL, "invalid remote file name: %s\n", file); else if (strstr(recvbuf, " UNSUPPORTED")) error(FATAL, "unsupported remote file type: %s\n", file); else if (strstr(recvbuf, " NETDUMP")) return TYPE_NETDUMP; else if (strstr(recvbuf, " ELF")) return TYPE_ELF; else if (strstr(recvbuf, " MCLXCD")) return TYPE_MCLXCD; else if (strstr(recvbuf, " DEVMEM")) return TYPE_DEVMEM; else if (strstr(recvbuf, " LKCD")) return TYPE_LKCD; else if (strstr(recvbuf, " S390D")) return TYPE_S390D; return (error(FATAL, "unknown remote file type: %s\n", file)); } /* * Try to set the receive buffer size to READBUFSIZE with setsockopt(), * storing the value returned by getsockopt() after the attempt is made. * Then enforce a SO_RCVLOWAT (low water mark) of 1, to ensure that error * recovery won't get hung in the recv() call in remote_clear_pipeline(). */ void remote_socket_options(int sockfd) { int rcvbuf, optlen; pc->rcvbufsize = rcvbuf = READBUFSIZE; if (setsockopt(sockfd, SOL_SOCKET, SO_RCVBUF, (char *)&rcvbuf, sizeof(rcvbuf)) < 0) { error(INFO, "SO_RCVBUF setsockopt error\n"); return; } optlen = sizeof(rcvbuf); if (getsockopt(sockfd, SOL_SOCKET, SO_RCVBUF, (char *)&rcvbuf, (socklen_t *)&optlen) < 0) { error(INFO, "SO_RCVBUF getsockopt error\n"); return; } if (CRASHDEBUG(1)) printf("socket SO_RCVBUF size: %d\n", rcvbuf); rcvbuf = 1; if (setsockopt(sockfd, SOL_SOCKET, SO_RCVLOWAT, (char *)&rcvbuf, sizeof(rcvbuf)) < 0) { /* * Earlier versions of Linux TCP won't accept this option, * which is hardcoded to the desired count of 1 anyway. * Set it to 0, and verify it as 1 in the getsockopt() call. */ if (CRASHDEBUG(1)) error(INFO, "SO_RCVLOWAT setsockopt error: %s\n", strerror(errno)); rcvbuf = 0; } optlen = sizeof(rcvbuf); if (getsockopt(sockfd, SOL_SOCKET, SO_RCVLOWAT, (char *)&rcvbuf, (socklen_t *)&optlen) < 0) { error(INFO, "SO_RCVLOWAT getsockopt error\n"); return; } if (CRASHDEBUG(1) || (rcvbuf != 1)) error(INFO, "socket SO_RCVLOWAT value: %d\n", rcvbuf); } /* * Wrapper around recv to read full length packet. */ static int remote_tcp_read(int sock, const char *pv_buffer, size_t cb_buffer) { size_t cb_total = 0; do { ssize_t cb_read = recv(sock, (void*)pv_buffer, cb_buffer, MSG_NOSIGNAL); if (cb_read <= 0) return cb_read; cb_total += cb_read; cb_buffer -= cb_read; pv_buffer = (char *)pv_buffer + cb_read; } while (cb_buffer); return cb_total; } /* * Wrapper around recv to read full string packet. */ static int remote_tcp_read_string(int sock, const char *pv_buffer, size_t cb_buffer, int nil_mode) { size_t cb_total = 0; do { ssize_t cb_read = recv(sock, (void*)pv_buffer, cb_buffer, MSG_NOSIGNAL); if (cb_read <= 0) return cb_read; cb_total += cb_read; if (!nil_mode && cb_total >= 4) return cb_total; if (!pv_buffer[cb_read - 1]) return cb_total; cb_buffer -= cb_read; pv_buffer = (char *)pv_buffer + cb_read; } while (cb_buffer); return cb_total; } /* * Wrapper around send to send full packet. */ static int remote_tcp_write(int sock, const void *pv_buffer, size_t cb_buffer) { do { size_t cb_now = cb_buffer; ssize_t cb_written = send(sock, (const char *)pv_buffer, cb_now, MSG_NOSIGNAL); if (cb_written < 0) return 1; cb_buffer -= cb_written; pv_buffer = (char *)pv_buffer + cb_written; } while (cb_buffer); return 0; } /* * Wrapper around tcp_write to send a string */ static int remote_tcp_write_string(int sock, const char *pv_buffer) { return remote_tcp_write(sock, pv_buffer, strlen(pv_buffer) + 1); } /* * Request that the daemon open a file. */ static int remote_file_open(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "OPEN %s", rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (CRASHDEBUG(1)) fprintf(fp, "remote_file_open: [%s]\n", recvbuf); if (strstr(recvbuf, "O_RDWR") || strstr(recvbuf, "O_RDONLY")) { p1 = strtok(recvbuf, " "); /* OPEN */ p1 = strtok(NULL, " "); /* filename */ p1 = strtok(NULL, " "); /* fd */ rfp->fd = atoi(p1); p1 = strtok(NULL, " "); /* flags */ if (STREQ(p1, "O_RDWR")) rfp->flags |= O_RDWR; else if (STREQ(p1, "O_RDONLY")) rfp->flags |= O_RDONLY; p1 = strtok(NULL, " "); /* size */ rfp->size = atoi(p1); return TRUE; } else return FALSE; } /* * Request that the daemon close a previously-opened file. */ static int remote_file_close(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "CLOSE %d", rfp->fd); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); return (strstr(recvbuf, "OK") ? TRUE : FALSE); } /* * Get a copy of the daemon machine's /proc/version */ static int remote_proc_version(char *buf) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "PROC_VERSION"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (STREQ(recvbuf, "")) { buf[0] = 0; return FALSE; } strcpy(buf, recvbuf); return TRUE; } /* * Check that virt_phys_base when accessed via * phys_base - text_start is phys_base. */ static int validate_phys_base(physaddr_t phys_base, physaddr_t text_start, physaddr_t virt_phys_base) { ulong value; if (CRASHDEBUG(3)) fprintf(fp, "validate_phys_base: virt_phys_base=0x%llx phys_base=0x%llx text_start=0x%llx calc=0x%llx\n", (long long unsigned int)virt_phys_base, (long long unsigned int)phys_base, (long long unsigned int)text_start, (long long unsigned int)virt_phys_base + phys_base - text_start); if (READMEM(pc->rmfd, (void*)&value, sizeof(value), virt_phys_base, virt_phys_base + phys_base - text_start) == sizeof(value)) { if (value == phys_base) return 1; } return 0; } /* * Get remote phys_base based on virtual address of "phys_base". */ physaddr_t get_remote_phys_base(physaddr_t text_start, physaddr_t virt_phys_base) { int vcpu; ulong value; if (rc->vfd < 0) { struct remote_file remote_file, *rfp; rfp = &remote_file; BZERO(rfp, sizeof(struct remote_file)); rfp->filename = "/dev/vmem"; if (remote_file_open(rfp)) { rc->vfd = rfp->fd; } else return 0; } for (vcpu = 0; vcpu < rc->n_cpus; vcpu++) if (remote_memory_read(rc->vfd, (void*)&value, sizeof(value), virt_phys_base, vcpu) == sizeof(value)) { if (validate_phys_base(value, text_start, virt_phys_base)) return value; } return 0; } /* * Do a remote VTOP if supported. */ physaddr_t remote_vtop(int cpu, physaddr_t virt_addr) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; int errflag; ulong value; if (!rc->remote_type[0]) return 0; /* Not a special remote. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "VTOP %d %llx", cpu, (long long unsigned int)virt_addr); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (CRASHDEBUG(2)) fprintf(fp, "remote_vtop: [%s]\n", recvbuf); if (strstr(recvbuf, "")) error(FATAL, "remote_vtop for CPU %d\n", cpu); p1 = strtok(recvbuf, " "); /* VTOP */ p1 = strtok(NULL, " "); /* cpu */ p1 = strtok(NULL, " "); /* vaddr */ p1 = strtok(NULL, " "); /* paddr */ errflag = 0; value = htol(p1, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { return value; } return 0; } /* * Get a copy of the daemon machine cpu regs. */ int get_remote_regs(struct bt_info *bt, ulong *eip, ulong *esp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2; int errflag; ulong value; if (!rc->remote_type[0]) return 0; /* Not a special remote. */ *eip = 0; *esp = 0; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "FETCH_LIVE_IP_SP_BP %d", bt->tc->processor); if (remote_tcp_write_string(pc->sockfd, sendbuf)) return 0; errflag = remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (errflag <= 0) return 0; if (CRASHDEBUG(1)) fprintf(fp, "get_remote_regs(cpu=%d): [%s]\n", bt->tc->processor, recvbuf); if (strstr(recvbuf, "")) { error(INFO, "get_remote_regs for CPU %d\n", bt->tc->processor); return 0; } p1 = strtok(recvbuf, " "); /* FETCH_LIVE_IP_SP_BP */ p1 = strtok(NULL, " "); /* cpu */ p1 = strtok(NULL, ":"); /* cs */ p1 = strtok(NULL, " "); /* ip */ p2 = strtok(NULL, ":"); /* ss */ p2 = strtok(NULL, " "); /* sp */ /* p2 = strtok(NULL, " "); bp */ errflag = 0; value = htol(p1, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { *eip = value; } errflag = 0; value = htol(p2, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { *esp = value; } return 1; } /* * Get a remote cr3 if supported. */ physaddr_t get_remote_cr3(int cpu) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; int errflag; ulong value; if (!rc->remote_type[0]) return 0; /* Not a special remote. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "FETCH_LIVE_CR3 %d", cpu); if (remote_tcp_write_string(pc->sockfd, sendbuf)) return 0; remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (CRASHDEBUG(1)) fprintf(fp, "get_remote_cr3: [%s]\n", recvbuf); if (strstr(recvbuf, "")) error(FATAL, "get_remote_cr3 for CPU %d\n", cpu); p1 = strtok(recvbuf, " "); /* FETCH_LIVE_CR3 */ p1 = strtok(NULL, " "); /* cpu */ p1 = strtok(NULL, " "); /* cr3 */ errflag = 0; value = htol(p1, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) return value; return 0; } /* * * Set up the file descriptors and file name strings if they haven't * been set up before: * * 1. pc->namelist must be set to a local kernel namelist, which will be * copied from the remote machine if it was not specified. * * 2. pc->dumpfile will never be set for a remote operation, because there * is no difference to readmem(). * * 3. pc->server_namelist may be set if it has to be copied across. * * 4. pc->server_memsrc will be set to either /dev/mem or the dumpfile. */ void remote_fd_init(void) { char filename[BUFSIZE]; struct remote_file remote_file, *rfp; rfp = &remote_file; if (pc->namelist && pc->server_namelist) { error(INFO, "too many namelists\n"); program_usage(SHORT_FORM); } if ((pc->namelist || pc->server_namelist) && pc->namelist_debug && pc->system_map) { error(INFO, "too many namelist options:\n %s\n %s\n %s\n", pc->namelist ? pc->namelist : pc->server_namelist, pc->namelist_debug, pc->system_map); program_usage(SHORT_FORM); } /* * Account for the remote possibility of a local dumpfile * being entered on the command line. */ if (pc->flags & MEMORY_SOURCES) { if (pc->server_memsrc) { error(INFO, "too many dumpfile/memory arguments\n"); program_usage(SHORT_FORM); } pc->flags |= MEMSRC_LOCAL; if (pc->flags & (DEVMEM|MEMMOD)) { if (!get_proc_version()) error(INFO, "/proc/version: %s\n", strerror(errno)); pc->flags |= LIVE_SYSTEM; } } else { /* * First open the remote memory source, defaulting to /dev/mem * if no remote dumpfile name was entered. If it is /dev/mem, * then also go get the remote /proc/version. */ pc->readmem = read_daemon; if (!pc->server_memsrc) pc->server_memsrc = "/dev/mem"; if (STREQ(pc->server_memsrc, "/dev/mem")) pc->flags |= REM_LIVE_SYSTEM; BZERO(rfp, sizeof(struct remote_file)); rfp->filename = pc->server_memsrc; if (remote_file_open(rfp)) { pc->rmfd = rfp->fd; if (rfp->flags & O_RDWR) pc->flags |= MFD_RDWR; if (BITS32() && REMOTE_ACTIVE()) { BZERO(rfp, sizeof(struct remote_file)); rfp->filename = "/dev/kmem"; if (remote_file_open(rfp)) pc->rkfd = rfp->fd; } if ((pc->flags & REM_NETDUMP) && !remote_netdump_init()) error(FATAL, "%s: remote initialization failed\n", pc->server_memsrc); if ((pc->flags & REM_LKCD) && !remote_lkcd_dump_init()) error(FATAL, "%s: remote initialization failed\n", pc->server_memsrc); if ((pc->flags & REM_S390D) && !remote_s390_dump_init()) error(FATAL, "%s: remote initialization failed\n", pc->server_memsrc); if (REMOTE_DUMPFILE()) pc->writemem = write_daemon; } else error(FATAL, "cannot open remote memory source: %s\n", pc->server_memsrc); if (REMOTE_ACTIVE() && !remote_proc_version(kt->proc_version)) error(WARNING, "daemon cannot access /proc/version\n\n"); } /* * If a local namelist was entered, check whether it's readable. * If a server namelist was entered, copy it across. * If no server namelist was entered, query the daemon for it, * and if found, copy it across, */ if (pc->namelist) { if ((pc->nfd = open(pc->namelist, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->namelist, strerror(errno)); close(pc->nfd); pc->nfd = -1; pc->flags |= NAMELIST_LOCAL; } else if (pc->server_namelist) { BZERO(rfp, sizeof(struct remote_file)); rfp->filename = pc->server_namelist; if (!remote_file_open(rfp)) { error(FATAL, "daemon cannot open: %s\n", pc->server_namelist); } copy_to_local_namelist(rfp); remote_file_close(rfp); } else { BZERO(rfp, sizeof(struct remote_file)); BZERO(filename, BUFSIZE); rfp->filename = filename; if (!remote_find_booted_kernel(rfp)) error(FATAL, "remote daemon cannot find booted kernel\n"); if (!remote_file_open(rfp)) error(FATAL, "remote daemon cannot open: %s\n", pc->server_namelist); copy_to_local_namelist(rfp); remote_file_close(rfp); } if (REMOTE_ACTIVE()) pc->flags |= LIVE_SYSTEM; } /* * Copy a remote kernel to a local file, which gets unlinked in the normal * course of events. However, the pc->nfd file descriptor will be kept * alive in case there's a command put in place to keep the file around. */ static void copy_to_local_namelist(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char readbuf[READBUFSIZE]; int tty; if (pc->flags & KERNEL_DEBUG_QUERY) { /* * Don't bother copying the kernel if the daemon can * figure it out. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "DEBUGGING_SYMBOLS %s", rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "NO_DEBUG")) { sprintf(readbuf, "%s@%s", rfp->filename, pc->server); pc->namelist = readbuf; no_debugging_data(FATAL); } } pc->namelist = create_local_namelist(rfp); if (pc->flags & NAMELIST_LOCAL) return; if ((pc->nfd = open(pc->namelist, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU)) < 0) { pc->flags &= ~UNLINK_NAMELIST; error(FATAL, "cannot create local copy of kernel (%s)\n", pc->namelist); } tty = !(pc->flags & SILENT) && isatty(fileno(stdin)); if (!(pc->flags & NAMELIST_NO_GZIP)) { copy_remote_gzip_file(rfp, pc->namelist, tty ? "please wait... (copying remote kernel namelist: " : NULL); if (tty) fprintf(stderr, "\r \r"); return; } if (copy_remote_file(rfp, pc->nfd, pc->namelist, tty ? "please wait... (copying remote kernel namelist: " : NULL)) { if (tty) fprintf(stderr, "\r \r"); } else error(FATAL, "write to local copy of kernel namelist failed\n"); } /* * Try to create a file of the format: vmlinux@@hostname * If it already exists, append "_0", "_1", etc. until one's not found. * * The file will be unlinked by display_sys_stats() the first time it's * called. */ static char * create_local_namelist(struct remote_file *rfp) { char buf[BUFSIZE]; char *p1; int i, use_local_copy; p1 = (char *)basename(rfp->filename); sprintf(buf, "%s@%s", p1, pc->server); for (i = 0, use_local_copy = FALSE; i >= 0; i++) { if (file_exists(buf, NULL)) { if (identical_namelist(buf, rfp)) { use_local_copy = TRUE; break; } sprintf(buf, "%s@%s_%d", p1,pc->server, i); } else break; } if ((p1 = (char *)malloc((size_t)(strlen(buf)+1))) == NULL) error(FATAL, "cannot malloc temporary file name buffer\n"); strcpy(p1, buf); if (use_local_copy) pc->flags |= NAMELIST_LOCAL; else pc->flags |= UNLINK_NAMELIST; return p1; } /* * Before copying a kernel across, check whether a kernel of the same * name is identical to the remote version. */ static int identical_namelist(char *file, struct remote_file *rfp) { char *vers; FILE *pipe; struct stat sbuf; long csum; char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char readbuf[BUFSIZE]; if (stat(file, &sbuf) < 0) return FALSE; if (sbuf.st_size != rfp->size) return FALSE; if (remote_file_checksum(rfp) && file_checksum(file, &csum) && (csum == rfp->csum)) return TRUE; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); BZERO(readbuf, BUFSIZE); sprintf(sendbuf, "LINUX_VERSION %s", rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) return FALSE; vers = recvbuf; sprintf(readbuf, "/usr/bin/strings %s | grep 'Linux version'", file); if ((pipe = popen(readbuf, "r"))) { BZERO(readbuf, BUFSIZE); if (fread(readbuf, sizeof(char), BUFSIZE-1, pipe) <= 0) { pclose(pipe); return FALSE; } pclose(pipe); } else return FALSE; if (CRASHDEBUG(1)) { fprintf(fp, "remote version: [%s]\n", vers); fprintf(fp, "local version: [%s]\n", readbuf); fprintf(fp, "%s vs. %s => %s\n", file, rfp->filename, STREQ(vers, readbuf) ? "IDENTICAL" : "DIFFERENT"); } return (STREQ(vers, readbuf)); } /* * If a remote file exists, get its checksum and return TRUE. */ static int remote_file_checksum(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "SUM %s", rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) { error(INFO, "%s: does not exist on server %s\n", rfp->filename, pc->server); return FALSE; } strtok(recvbuf, " "); /* SUM */ p1 = strtok(NULL, " "); /* filename */ p1 = strtok(NULL, " "); /* checksum */ rfp->csum = htol(p1, FAULT_ON_ERROR, NULL); return TRUE; } /* * Copy a remote file locally, distinguishing it by appending an ampersand * and the server name. * * If the kernel is requested, save the unlinked copy of the remote kernel * in a local file, using the same name created by create_local_namelist(). * * If a dumpfile, module, or any other file for that matter, append an * ampersand plus the server name. * * Other files may have their local filename altered if a file of the * same name exists with a different checksum. */ int get_remote_file(struct remote_file *rfp) { int i; char local[BUFSIZE]; char readbuf[READBUFSIZE]; char *p1; struct load_module *lm; int cnt, sfd, err, retval; long csum; if (!REMOTE()) { error(INFO, "no remote files in use\n"); return FALSE; } if (rfp->local) goto generic_file_save; sprintf(readbuf, "%s@%s", pc->server_memsrc, pc->server); if (STREQ(rfp->filename, "dumpfile") || STREQ(rfp->filename, pc->server_memsrc) || STREQ(rfp->filename, basename(pc->server_memsrc)) || STREQ(rfp->filename, readbuf)) goto dumpfile_save; sprintf(readbuf, "%s", pc->namelist); if ((p1 = strstr(readbuf, "@"))) *p1 = NULLCHAR; if (STREQ(rfp->filename, "kernel") || STREQ(rfp->filename, pc->namelist) || STREQ(rfp->filename, pc->server_namelist) || STREQ(rfp->filename, readbuf)) goto kernel_save; if (STREQ(rfp->filename, "modules")) { for (i = 0; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_REMOTE) { fprintf(fp, "%s module saved as: %s\n", lm->mod_name, lm->mod_namelist); lm->mod_flags &= ~MOD_REMOTE; } } return TRUE; } if (is_module_name(rfp->filename, NULL, &lm)) { if (lm->mod_flags & MOD_REMOTE) { fprintf(fp, "%s module saved as: %s\n", lm->mod_name, lm->mod_namelist); lm->mod_flags &= ~MOD_REMOTE; } return TRUE; } strcpy(local, rfp->filename); if ((p1 = strstr(local, ".o"))) { *p1 = NULLCHAR; if (is_module_name(basename(local), NULL, &lm)) { if (lm->mod_flags & MOD_REMOTE) { fprintf(fp, "%s module saved as: %s\n", lm->mod_name, lm->mod_namelist); lm->mod_flags &= ~MOD_REMOTE; return TRUE; } } } generic_file_save: cnt = 0; sprintf(local, "%s@%s", basename(rfp->filename), pc->server); while (file_exists(local, NULL)) { if (CRASHDEBUG(1)) fprintf(fp, "%s already exists in this directory\n", local); if (file_checksum(local, &csum) && (csum == rfp->csum)) { if (CRASHDEBUG(1)) error(NOTE, "local %s checksum matches -- using it\n", local); strcpy(rfp->local, local); return TRUE; } sprintf(local, "%s@%s_%d", basename(rfp->filename), pc->server, ++cnt); } if (!remote_file_open(rfp)) { error(INFO, "daemon cannot open: %s\n", rfp->filename); return FALSE; } if ((sfd = open(local, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU)) < 0) { error(INFO, "open: %s: %s\n", local, strerror(errno)); remote_file_close(rfp); return FALSE; } if (copy_remote_file(rfp, sfd, local, rfp->flags & REMOTE_VERBOSE ? "please wait... (copying remote file: " : NULL)) { if (rfp->flags & REMOTE_VERBOSE) fprintf(stderr, "\rremote file saved as: \"%s\" \n", local); retval = TRUE; rfp->flags |= REMOTE_COPY_DONE; } else { fprintf(stderr, "\r%s NOT saved \n", rfp->filename); retval = FALSE; } close(sfd); remote_file_close(rfp); if (cnt) strcpy(rfp->local, local); return retval; kernel_save: if (pc->flags & NAMELIST_SAVED) { error(INFO, "\"%s\" is already saved\n", pc->namelist); return FALSE; } if (pc->flags & NAMELIST_LOCAL) { error(INFO, "\"%s\" is a local file\n", pc->namelist); return FALSE; } if ((sfd = open(pc->namelist, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU)) < 0) { error(INFO, "open: %s: %s\n", pc->namelist, strerror(errno)); return FALSE; } err = 0; lseek(sfd, 0, SEEK_SET); lseek(pc->nfd, 0, SEEK_SET); while ((cnt = read(pc->nfd, readbuf, READBUFSIZE)) > 0) { if (write(sfd, readbuf, cnt) != cnt) { error(INFO, "write:%s: %s\n", pc->namelist, strerror(errno)); err++; break; } } close(sfd); if (err) { fprintf(fp, "%s NOT saved\n", pc->namelist); unlink(pc->namelist); retval = FALSE; } else { fprintf(fp, "kernel saved as: \"%s\"\n", pc->namelist); close(pc->nfd); pc->nfd = -1; pc->flags |= NAMELIST_SAVED; retval = TRUE; } return (retval); dumpfile_save: if (pc->flags & DUMPFILE_SAVED) { error(INFO, "\"%s@%s\" is already saved\n", basename(pc->server_memsrc), pc->server); return FALSE; } if (pc->flags & MEMSRC_LOCAL) { error(INFO, "%s is a local file\n", pc->dumpfile); return FALSE; } if (!(REMOTE_DUMPFILE())) { error(INFO, "%s is not a dumpfile\n", pc->server_memsrc); return FALSE; } sprintf(local, "%s@%s", basename(pc->server_memsrc), pc->server); if (file_exists(local, NULL)) { error(INFO, "%s already exists in this directory\n", local); return FALSE; } rfp->filename = pc->server_memsrc; if (!remote_file_open(rfp)) { error(INFO, "daemon cannot open: %s\n", pc->server_memsrc); return FALSE; } if ((sfd = open(local, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU)) < 0) { error(INFO, "open: %s: %s\n", local, strerror(errno)); remote_file_close(rfp); return FALSE; } if (copy_remote_file(rfp, sfd, local, "please wait... (copying remote dumpfile: ")) { fprintf(stderr, "\rdumpfile saved as: \"%s\" \n", local); pc->flags |= DUMPFILE_SAVED; retval = TRUE; } else { fprintf(stderr, "\r%s NOT saved \n", pc->server_memsrc); retval = FALSE; } close(sfd); remote_file_close(rfp); return (retval); } /* * Query the remote daemon for the kernel name that is running. */ static int remote_find_booted_kernel(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "FIND_BOOTED_KERNEL"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); strtok(recvbuf, " "); /* FIND_BOOTED_KERNEL */ p1 = strtok(NULL, " "); /* filename */ if (STREQ(p1, "")) return FALSE; strcpy(rfp->filename, p1); return TRUE; } static int remote_lkcd_dump_init(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2, *p3; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "LKCD_DUMP_INIT %d %s", pc->rmfd, pc->server_memsrc); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) return FALSE; p1 = strstr(recvbuf, "panic_task: "); p2 = strstr(recvbuf, "panicmsg: "); if (p1) { p1 += strlen("panic_task: "); p3 = strstr(p1, "\n"); *p3 = NULLCHAR; tt->panic_task = htol(p1, FAULT_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "panic_task: %lx\n", tt->panic_task); } if (p2) { p2 += strlen("panicmsg: "); if (CRASHDEBUG(1)) fprintf(fp, "panicmsg: %s", p2); } set_remote_lkcd_panic_data(tt->panic_task, p2); return TRUE; } static int remote_s390_dump_init(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2, *p3; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "S390_DUMP_INIT %d %s", pc->rmfd, pc->server_memsrc); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) return FALSE; p1 = strstr(recvbuf, "panic_task: "); p2 = strstr(recvbuf, "panicmsg: "); if (p1) { p1 += strlen("panic_task: "); p3 = strstr(p1, "\n"); *p3 = NULLCHAR; tt->panic_task = htol(p1, FAULT_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "panic_task: %lx\n", tt->panic_task); } if (p2) { p2 += strlen("panicmsg: "); if (CRASHDEBUG(1)) fprintf(fp, "panicmsg: %s", p2); } return TRUE; } static int remote_netdump_init(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2; ulong panic_task; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "NETDUMP_INIT %d %s", pc->rmfd, pc->server_memsrc); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) return FALSE; p1 = strstr(recvbuf, "panic_task: "); if (p1) { p1 += strlen("panic_task: "); p2 = strstr(p1, "\n"); *p2 = NULLCHAR; panic_task = htol(p1, FAULT_ON_ERROR, NULL); tt->panic_task = panic_task; /* kludge */ if (CRASHDEBUG(1)) fprintf(fp, "panic_task: %lx\n", tt->panic_task); } return TRUE; } uint remote_page_size(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2, *p3; uint psz; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); if (REMOTE_ACTIVE()) sprintf(sendbuf, "PAGESIZE LIVE"); else if (REMOTE_PAUSED()) sprintf(sendbuf, "PAGESIZE NIL"); else if (pc->flags & REM_NETDUMP) sprintf(sendbuf, "PAGESIZE NETDUMP"); else if (pc->flags & REM_MCLXCD) sprintf(sendbuf, "PAGESIZE MCLXCD"); else if (pc->flags & REM_LKCD) sprintf(sendbuf, "PAGESIZE LKCD"); else if (pc->flags & REM_S390D) sprintf(sendbuf, "PAGESIZE S390D"); else error(FATAL, "cannot determine remote page size (unknown memory source)\n"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "FAIL")) error(FATAL, "cannot determine remote page size\n"); strtok(recvbuf, " "); /* PAGESIZE */ p1 = strtok(NULL, " "); /* LIVE, MCLXCD or LKCD */ p1 = strtok(NULL, " "); /* page size */ p2 = strtok(NULL, " "); /* remote type */ p3 = strtok(NULL, " "); /* number of Cpus */ psz = atoi(p1); if (psz > MAXRECVBUFSIZE) error(FATAL, "remote page size %d is larger than MAXRECVBUFSIZE!\n", psz); if (p2) { strncpy(rc->remote_type, p2, sizeof(rc->remote_type) - 1); rc->remote_type[sizeof(rc->remote_type) - 1] = 0; } if (p3) rc->n_cpus = atoi(p3); return psz; } /* * Copy a remote file to a local file, closing the passed-in fd when done. * A running tally of percentage-done numbers can optionally be displayed. */ static int copy_remote_file(struct remote_file *rfp, int fd, char *file, char *ttystr) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char readbuf[READBUFSIZE]; char *bufptr; long pct, last; ulong size, offset, filesize; ulong ret, req, tot; int sysret ATTRIBUTE_UNUSED; ssize_t bytes ATTRIBUTE_UNUSED; last = -1; lseek(fd, 0, SEEK_SET); filesize = rfp->size; for (offset = 0; offset < filesize; ) { size = MIN(filesize-offset, pc->rcvbufsize); BZERO(sendbuf, BUFSIZE); sprintf(sendbuf, "READ %d %lx %ld", rfp->fd, offset, size); bytes = write(pc->sockfd, sendbuf, strlen(sendbuf) + 1); bzero(readbuf, READBUFSIZE); req = size; tot = 0; sprintf(recvbuf, "%s:FAIL", sendbuf); bufptr = readbuf; while (req) { ret = recv(pc->sockfd, bufptr, req, 0); if (!tot && STRNEQ(bufptr, recvbuf)) { tot = -1; break; } req -= ret; tot += ret; bufptr += ret; } if (tot == -1) break; if (write(fd, readbuf, size) != size) { error(INFO, "%swrite to local file \"%s\" failed", ttystr ? "\n" : "", file); close(fd); return FALSE; } offset += tot; if (ttystr) { pct = (offset*100)/filesize; if (pct > last) { /* readline work-around... */ if (last < 0) sprintf(readbuf, "echo -n \'%s0%%)\'", ttystr); else if (last >= 0 && last < 10) sprintf(readbuf, "echo -e -n \"\\b\\b\\b%ld%%)\"", pct); else if (last < 100) sprintf(readbuf, "echo -e -n \"\\b\\b\\b\\b%ld%%)\"", pct); sysret = system(readbuf); last = pct; } } } if (offset != filesize) { error(INFO, "%swrite to local file \"%s\" failed", ttystr ? "\n" : "", file); close(fd); return FALSE; } fsync(fd); return TRUE; } /* * Copy a remote file to a local file, closing the passed-in fd when done. * A running tally of percentage-done numbers can optionally be displayed. */ static void copy_remote_gzip_file(struct remote_file *rfp, char *file, char *ttystr) { int done; char sendbuf[BUFSIZE]; char readbuf[READBUFSIZE]; char gziphdr[DATA_HDRSIZE]; char *bufptr, *p1; FILE *pipe; size_t gtot; struct stat sbuf; ulong pct, ret, req, tot, total; sprintf(readbuf, "/usr/bin/gunzip > %s", pc->namelist); if ((pipe = popen(readbuf, "w")) == NULL) error(FATAL, "cannot open pipe to create %s\n", pc->namelist); BZERO(sendbuf, BUFSIZE); sprintf(sendbuf, "READ_GZIP %ld %s", pc->rcvbufsize, rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); bzero(readbuf, READBUFSIZE); done = total = 0; gtot = 0; while (!done) { req = pc->rcvbufsize; bufptr = readbuf; tot = 0; while (req) { ret = (ulong)recv(pc->sockfd, bufptr, req, 0); if (!tot) { if (STRNEQ(bufptr, FAILMSG)) { fprintf(fp, "copy_remote_gzip_file: %s\n", bufptr); tot = -1; break; } if (STRNEQ(bufptr, DONEMSG) || STRNEQ(bufptr, DATAMSG)) { strncpy(gziphdr, bufptr, DATA_HDRSIZE); if (CRASHDEBUG(1)) fprintf(fp, "copy_remote_gzip_file: [%s]\n", gziphdr); p1 = strtok(gziphdr, " "); /* DONE */ if (STREQ(p1, "DONE")) done = TRUE; p1 = strtok(NULL, " "); /* count */ gtot = atol(p1); total += gtot; } } req -= ret; tot += ret; bufptr += ret; } if (tot == -1) break; if (fwrite(&readbuf[DATA_HDRSIZE], sizeof(char), gtot, pipe) != gtot) error(FATAL, "fwrite to %s failed\n", pc->namelist); if (ttystr && (stat(pc->namelist, &sbuf) == 0)) { pct = (sbuf.st_size * 100)/rfp->size; fprintf(stderr, "\r%s%ld%%)%s", ttystr, pct, CRASHDEBUG(1) ? "\n" : ""); } } if (CRASHDEBUG(1)) fprintf(fp, "copy_remote_gzip_file: GZIP total: %ld\n", total); pclose(pipe); } /* * Set up to have get_remote_file() copy the remote module locally. * If it's already here, no copy is done. */ int find_remote_module_objfile(struct load_module *lm, char *module, char *retbuf) { int absolute; char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char local[BUFSIZE]; char found[BUFSIZE]; char *p1; long csum; struct remote_file remote_file, *rfp; rfp = &remote_file; BZERO(rfp, sizeof(struct remote_file)); absolute = (*module == '/'); if (absolute) { if ((p1 = strstr(module, "@"))) { *p1 = NULLCHAR; } else { error(FATAL, "module file name must have \"@server-name\" attached\n"); } sprintf(local, "%s@%s", basename(module), pc->server); rfp->filename = module; rfp->local = local; if (!remote_file_checksum(rfp)) { error(INFO, "%s: does not exist on server %s\n", module, pc->server); return FALSE; } } else { if ((p1 = strstr(module, "@"))) *p1 = NULLCHAR; sprintf(local, "%s@%s", module, pc->server); BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "FIND_MODULE %s %s", kt->utsname.release, module); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) { fprintf(fp, "find_remote_module_objfile: [%s]\n", recvbuf); return FALSE; } strtok(recvbuf, " "); /* FIND_MODULE */ p1 = strtok(NULL, " "); /* release */ p1 = strtok(NULL, " "); /* module */ strcpy(found, strtok(NULL, " ")); /* resultant path */ p1 = strtok(NULL, " "); /* checksum */ csum = htol(p1, FAULT_ON_ERROR, NULL); rfp->filename = found; rfp->local = local; rfp->csum = csum; } if (get_remote_file(rfp)) { if (!is_elf_file(rfp->local)) { error(INFO, "%s@%s: not an ELF format object file\n", rfp->filename, pc->server); return FALSE; } strcpy(retbuf, rfp->local); if (rfp->flags & REMOTE_COPY_DONE) { lm->mod_flags |= MOD_REMOTE; pc->flags |= UNLINK_MODULES; } return TRUE; } return FALSE; } /* * Tell the daemon to free the current dumpfile memory. */ int remote_free_memory(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *type, *p1; if (pc->flags & REM_NETDUMP) type = "NETDUMP"; else if (pc->flags & REM_MCLXCD) type = "MCLXCD"; else if (pc->flags & REM_LKCD) type = "LKCD"; else if (pc->flags & REM_S390D) type = "S390D"; else return 0; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "MEMORY FREE %s", type); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); p1 = strtok(recvbuf, " "); /* MEMORY */ p1 = strtok(NULL, " "); /* FREE */ p1 = strtok(NULL, " "); /* MCLXCD, LKCD etc. */ p1 = strtok(NULL, " "); /* pages */ if (STREQ(p1, "")) return 0; return(atol(p1)); } /* * Return the number of dumpfile pages used by the daemon. */ int remote_memory_used(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *type, *p1; if (pc->flags & REM_NETDUMP) type = "NETDUMP"; else if (pc->flags & REM_MCLXCD) type = "MCLXCD"; else if (pc->flags & REM_LKCD) type = "LKCD"; else if (pc->flags & REM_S390D) type = "S390D"; else return 0; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "MEMORY USED %s", type); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); p1 = strtok(recvbuf, " "); /* MEMORY */ p1 = strtok(NULL, " "); /* FREE */ p1 = strtok(NULL, " "); /* MCLXCD, LKCD, etc. */ p1 = strtok(NULL, " "); /* pages */ if (STREQ(p1, "")) return 0; return(atol(p1)); } /* * Have the daemon return the output of vas_memory_dump(), lkcd_memory_dump(). * or dump_lkcd_environment() */ int remote_memory_dump(int verbose) { char sendbuf[BUFSIZE]; char readbuf[READBUFSIZE]; char datahdr[DATA_HDRSIZE]; char *type, *bufptr, *p1; ulong done, total; ulong ret, req, tot; size_t dtot; if (pc->flags & REM_NETDUMP) type = "NETDUMP"; else if (pc->flags & REM_MCLXCD) type = "MCLXCD"; else if (pc->flags & REM_LKCD) type = "LKCD"; else if (pc->flags & REM_S390D) type = "S390D"; else return 0; BZERO(sendbuf, BUFSIZE); sprintf(sendbuf, "MEMORY_DUMP %ld %s%s", pc->rcvbufsize, type, verbose ? "_VERBOSE" : ""); remote_tcp_write_string(pc->sockfd, sendbuf); bzero(readbuf, READBUFSIZE); done = total = 0; dtot = 0; while (!done) { req = pc->rcvbufsize; bufptr = readbuf; tot = 0; while (req) { ret = recv(pc->sockfd, bufptr, req, 0); if (!tot) { if (STRNEQ(bufptr, FAILMSG)) { fprintf(fp, "remote_memory_dump: %s\n", bufptr); tot = -1; break; } if (STRNEQ(bufptr, DONEMSG) || STRNEQ(bufptr, DATAMSG)) { strncpy(datahdr, bufptr, DATA_HDRSIZE); if (CRASHDEBUG(1)) fprintf(fp, "remote_memory_dump: [%s]\n", datahdr); p1 = strtok(datahdr, " "); /* DONE */ if (STREQ(p1, "DONE")) done = TRUE; p1 = strtok(NULL, " "); /* count */ dtot = atol(p1); total += dtot; } } req -= ret; tot += ret; bufptr += ret; } if (tot == -1) break; if (fwrite(&readbuf[DATA_HDRSIZE], sizeof(char), dtot, fp) != dtot) error(FATAL, "fwrite to %s failed\n", pc->namelist); } return 1; } /* * Read memory from the remote memory source. The remote file descriptor * is abstracted to allow for a common /dev/mem-/dev/kmem call. Since * this is only called from read_daemon(), the request can never exceed * a page in length. */ int remote_memory_read(int rfd, char *buffer, int cnt, physaddr_t address, int vcpu) { char sendbuf[BUFSIZE]; char datahdr[DATA_HDRSIZE]; char *p1; int ret, tot; ulong addr; addr = (ulong)address; /* may be virtual */ BZERO(sendbuf, BUFSIZE); if (pc->flags & REM_NETDUMP) { sprintf(sendbuf, "READ_NETDUMP %lx %d", addr, cnt); } else if (pc->flags & REM_MCLXCD) sprintf(sendbuf, "READ_MCLXCD %lx %d", addr, cnt); else if (pc->flags & REM_LKCD) sprintf(sendbuf, "READ_LKCD %d %lx %d", rfd, addr, cnt); else if (pc->flags & REM_S390D) sprintf(sendbuf, "READ_S390D %d %lx %d", rfd, addr, cnt); else if (vcpu >= 0) sprintf(sendbuf, "READ_LIVE %d %lx %d %d", rfd, addr, cnt, vcpu); else sprintf(sendbuf, "READ_LIVE %d %lx %d", rfd, addr, cnt); if (remote_tcp_write_string(pc->sockfd, sendbuf)) return -1; /* * Read request will come back with a singular header * followed by the data. */ BZERO(datahdr, DATA_HDRSIZE); ret = remote_tcp_read_string(pc->sockfd, datahdr, DATA_HDRSIZE, 1); if (ret <= 0) return -1; if (CRASHDEBUG(3)) fprintf(fp, "remote_memory_read: [%s]\n", datahdr); if (STRNEQ(datahdr, FAILMSG)) { p1 = strtok(datahdr, " "); /* FAIL */ p1 = strtok(NULL, " "); /* errno */ errno = atoi(p1); return -1; } if (!STRNEQ(datahdr, DONEMSG) && !STRNEQ(datahdr, DATAMSG)) { error(INFO, "out of sync with remote memory source\n"); return -1; } p1 = strtok(datahdr, " "); /* DONE */ p1 = strtok(NULL, " "); /* count */ tot = atol(p1); if (cnt != tot) { error(FATAL, "requested %d bytes remote memory return %d bytes\n", cnt, tot); return -1; } ret = remote_tcp_read(pc->sockfd, buffer, tot); if (ret != tot) { error(FATAL, "requested %d bytes remote memory return %d bytes\n", ret, tot); return -1; } return tot; } /* * If a command was interrupted locally, there may be leftover data waiting * to be read. */ void remote_clear_pipeline(void) { int ret; fd_set rfds; char recvbuf[READBUFSIZE]; struct timeval tv; tv.tv_sec = 0; tv.tv_usec = 0; FD_ZERO(&rfds); FD_SET(pc->sockfd, &rfds); ret = select(pc->sockfd+1, &rfds, NULL, NULL, &tv); if (FD_ISSET(pc->sockfd, &rfds)) { ret = recv(pc->sockfd, recvbuf, pc->rcvbufsize, 0); if (CRASHDEBUG(1)) error(INFO, "remote_clear_pipeline(%d): %d bytes discarded\n", pc->sockfd, ret); } } /* * Attempt to run the user-entered command on the remote system. */ int remote_execute(void) { char command[BUFSIZE]; char sendbuf[BUFSIZE]; char readbuf[READBUFSIZE]; char datahdr[DATA_HDRSIZE]; char *bufptr, *p1; ulong done, total; ulong ret, req, tot; size_t dtot; if (!STRNEQ(args[0], "@") || strlen(args[0]) == 1) return FALSE; shift_string_left(concat_args(command, 0, FALSE), 1); if (QUOTED_STRING(command)) strip_ending_char(strip_beginning_char(command, '"'), '"'); if (CRASHDEBUG(1)) error(INFO, "remote command: %s\n", command); BZERO(sendbuf, BUFSIZE); sprintf(sendbuf, "EXECUTE %ld %s", pc->rcvbufsize, command); remote_tcp_write_string(pc->sockfd, sendbuf); bzero(readbuf, READBUFSIZE); done = total = 0; dtot = 0; while (!done) { req = pc->rcvbufsize; bufptr = readbuf; tot = 0; while (req) { ret = recv(pc->sockfd, bufptr, req, 0); if (!tot) { if (STRNEQ(bufptr, FAILMSG)) { fprintf(fp, "remote_execute: %s\n", bufptr); tot = -1; break; } if (STRNEQ(bufptr, DONEMSG) || STRNEQ(bufptr, DATAMSG)) { strncpy(datahdr, bufptr, DATA_HDRSIZE); if (CRASHDEBUG(1)) fprintf(fp, "remote_execute: [%s]\n", datahdr); p1 = strtok(datahdr, " "); /* DONE */ if (STREQ(p1, "DONE")) done = TRUE; p1 = strtok(NULL, " "); /* count */ dtot = atol(p1); total += dtot; } } req -= ret; tot += ret; bufptr += ret; } if (tot == -1) break; if (fwrite(&readbuf[DATA_HDRSIZE], sizeof(char), dtot, fp) != dtot) error(FATAL, "fwrite failed\n"); } return TRUE; } /* * Clean up on exit. */ void remote_exit(void) { char buf[BUFSIZE]; if (pc->flags & UNLINK_NAMELIST) unlink(pc->namelist); if (pc->flags & UNLINK_MODULES) unlink_module(NULL); BZERO(buf, BUFSIZE); sprintf(buf, "EXIT"); remote_tcp_write_string(pc->sockfd, buf); /* * Read but ignore the return status -- we don't really care... */ remote_tcp_read_string(pc->sockfd, buf, BUFSIZE-1, NIL_MODE()); } #endif /* !DAEMON */ crash-7.1.4/lkcd_vmdump_v1.h0000775000000000000000000001202112634305150014405 0ustar rootroot/* lkcd_vmdump_v1.h - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _VMDUMP_H #define _VMDUMP_H /* necessary header files */ #ifndef MCLX #include /* for utsname structure */ #endif #ifndef IA64 typedef unsigned int u32; #include /* for pt_regs */ #endif /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ #ifdef CONFIG_VMDUMP /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_VERSION_NUMBER 0x1 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump flags -- add as necessary */ #define DUMP_RAW 0x1 /* raw page (no compression) */ #define DUMP_COMPRESSED 0x2 /* page is compressed */ #define DUMP_END 0x4 /* end marker on a full dump */ /* dump types - type specific stuff added later for page typing */ #define DUMP_NONE 0 /* no dumping at all -- just bail */ #define DUMP_HEADER 1 /* kernel dump header only */ #define DUMP_KERN 2 /* dump header and kernel pages */ #define DUMP_USED 3 /* dump header, kernel/user pages */ #define DUMP_ALL 4 /* dump header, all memory pages */ /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the esp for i386 systems -- MOVE LATER */ uint32_t dh_esp; /* the eip for i386 systems -- MOVE LATER */ uint32_t dh_eip; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the utsname (uname) information */ struct new_utsname dh_utsname; /* the dump registers */ #ifndef IA64 #ifndef S390 #ifndef S390X #ifndef ARM64 struct pt_regs dh_regs; #endif #endif #endif #endif /* the address of the current task */ struct task_struct *dh_current_task; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; #endif /* CONFIG_VMDUMP */ #ifdef __KERNEL__ extern void dump_init(uint64_t, uint64_t); extern void dump_open(char *); extern void dump_execute(char *, struct pt_regs *); #endif #endif /* _VMDUMP_H */ crash-7.1.4/arm64.c0000664000000000000000000016476412634305150012441 0ustar rootroot/* * arm64.c - core analysis suite * * Copyright (C) 2012-2015 David Anderson * Copyright (C) 2012-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef ARM64 #include "defs.h" #include #include #define NOT_IMPLEMENTED(X) error((X), "%s: function not implemented\n", __func__) static struct machine_specific arm64_machine_specific = { 0 }; static int arm64_verify_symbol(const char *, ulong, char); static void arm64_parse_cmdline_args(void); static void arm64_calc_phys_offset(void); static void arm64_calc_virtual_memory_ranges(void); static int arm64_kdump_phys_base(ulong *); static ulong arm64_processor_speed(void); static void arm64_init_kernel_pgd(void); static int arm64_kvtop(struct task_context *, ulong, physaddr_t *, int); static int arm64_uvtop(struct task_context *, ulong, physaddr_t *, int); static int arm64_vtop_2level_64k(ulong, ulong, physaddr_t *, int); static int arm64_vtop_3level_4k(ulong, ulong, physaddr_t *, int); static ulong arm64_get_task_pgd(ulong); static void arm64_stackframe_init(void); static int arm64_eframe_search(struct bt_info *); static int arm64_is_kernel_exception_frame(struct bt_info *, ulong); static int arm64_in_exception_text(ulong); static void arm64_back_trace_cmd(struct bt_info *); static void arm64_print_text_symbols(struct bt_info *, struct arm64_stackframe *, FILE *); static int arm64_print_stackframe_entry(struct bt_info *, int, struct arm64_stackframe *, FILE *); static void arm64_display_full_frame(struct bt_info *, ulong); static int arm64_unwind_frame(struct bt_info *, struct arm64_stackframe *); static int arm64_get_dumpfile_stackframe(struct bt_info *, struct arm64_stackframe *); static int arm64_in_kdump_text(struct bt_info *, struct arm64_stackframe *); static int arm64_get_stackframe(struct bt_info *, struct arm64_stackframe *); static void arm64_get_stack_frame(struct bt_info *, ulong *, ulong *); static void arm64_print_exception_frame(struct bt_info *, ulong, int, FILE *ofp); static void arm64_do_bt_reference_check(struct bt_info *, ulong, char *); static int arm64_translate_pte(ulong, void *, ulonglong); static ulong arm64_vmalloc_start(void); static int arm64_is_task_addr(ulong); static int arm64_dis_filter(ulong, char *, unsigned int); static void arm64_cmd_mach(void); static void arm64_display_machine_stats(void); static int arm64_get_smp_cpus(void); static void arm64_clear_machdep_cache(void); static int arm64_in_alternate_stack(int, ulong); static int arm64_get_kvaddr_ranges(struct vaddr_range *); static int arm64_get_crash_notes(void); static void arm64_calc_VA_BITS(void); static int arm64_is_uvaddr(ulong, struct task_context *); /* * Do all necessary machine-specific setup here. This is called several times * during initialization. */ void arm64_init(int when) { ulong value; struct machine_specific *ms; #if defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the ARM64 architecture\n"); #endif switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf64_notes; break; case PRE_SYMTAB: machdep->machspec = &arm64_machine_specific; machdep->verify_symbol = arm64_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->verify_paddr = generic_verify_paddr; if (machdep->cmdline_args[0]) arm64_parse_cmdline_args(); machdep->flags |= MACHDEP_BT_TEXT; break; case PRE_GDB: if (!machdep->pagesize) { /* * Kerneldoc Documentation/arm64/booting.txt describes * the kernel image header flags field. */ value = machdep->machspec->kernel_flags; value = (value >> 1) & 3; switch(value) { case 0: break; case 1: machdep->pagesize = 4096; break; case 2: /* TODO: machdep->pagesize = 16384; */ error(FATAL, "16K pages not supported."); break; case 3: machdep->pagesize = 65536; break; } } if (!machdep->pagesize && kernel_symbol_exists("swapper_pg_dir") && kernel_symbol_exists("idmap_pg_dir")) { value = symbol_value("swapper_pg_dir") - symbol_value("idmap_pg_dir"); /* * idmap_pg_dir is 2 pages prior to 4.1, * and 3 pages thereafter. Only 4K and 64K * page sizes are supported. */ switch (value) { case (4096 * 2): case (4096 * 3): machdep->pagesize = 4096; break; case (65536 * 2): case (65536 * 3): machdep->pagesize = 65536; break; } } else if (ACTIVE()) machdep->pagesize = memory_page_size(); /* host */ machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); arm64_calc_VA_BITS(); machdep->machspec->page_offset = ARM64_PAGE_OFFSET; machdep->identity_map_base = ARM64_PAGE_OFFSET; machdep->machspec->userspace_top = ARM64_USERSPACE_TOP; machdep->machspec->modules_vaddr = ARM64_MODULES_VADDR; machdep->machspec->modules_end = ARM64_MODULES_END; machdep->machspec->vmalloc_start_addr = ARM64_VMALLOC_START; machdep->machspec->vmalloc_end = ARM64_VMALLOC_END; machdep->kvbase = ARM64_VMALLOC_START; machdep->machspec->vmemmap_vaddr = ARM64_VMEMMAP_VADDR; machdep->machspec->vmemmap_end = ARM64_VMEMMAP_END; switch (machdep->pagesize) { case 4096: machdep->flags |= VM_L3_4K; machdep->ptrs_per_pgd = PTRS_PER_PGD_L3_4K; if ((machdep->pgd = (char *)malloc(PTRS_PER_PGD_L3_4K * 8)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PTRS_PER_PMD_L3_4K * 8)) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PTRS_PER_PTE_L3_4K * 8)) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->pud = NULL; /* not used */ break; case 65536: machdep->flags |= VM_L2_64K; machdep->ptrs_per_pgd = PTRS_PER_PGD_L2_64K; if ((machdep->pgd = (char *)malloc(PTRS_PER_PGD_L2_64K * 8)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->ptbl = (char *)malloc(PTRS_PER_PTE_L2_64K * 8)) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->pmd = NULL; /* not used */ machdep->pud = NULL; /* not used */ break; default: if (machdep->pagesize) error(FATAL, "invalid/unsupported page size: %d\n", machdep->pagesize); else error(FATAL, "cannot determine page size\n"); } machdep->last_pud_read = 0; /* not used */ machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->clear_machdep_cache = arm64_clear_machdep_cache; machdep->stacksize = ARM64_STACK_SIZE; machdep->flags |= VMEMMAP; arm64_calc_phys_offset(); machdep->uvtop = arm64_uvtop; machdep->kvtop = arm64_kvtop; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = arm64_is_uvaddr; machdep->eframe_search = arm64_eframe_search; machdep->back_trace = arm64_back_trace_cmd; machdep->in_alternate_stack = arm64_in_alternate_stack; machdep->processor_speed = arm64_processor_speed; machdep->get_task_pgd = arm64_get_task_pgd; machdep->get_stack_frame = arm64_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = arm64_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = arm64_vmalloc_start; machdep->get_kvaddr_ranges = arm64_get_kvaddr_ranges; machdep->is_task_addr = arm64_is_task_addr; machdep->dis_filter = arm64_dis_filter; machdep->cmd_mach = arm64_cmd_mach; machdep->get_smp_cpus = arm64_get_smp_cpus; machdep->line_number_hooks = NULL; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->dumpfile_init = NULL; machdep->verify_line_number = NULL; machdep->init_kernel_pgd = arm64_init_kernel_pgd; break; case POST_GDB: arm64_calc_virtual_memory_ranges(); machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; ms = machdep->machspec; if (THIS_KERNEL_VERSION >= LINUX(4,0,0)) { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 2; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 50; ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1); ms->PTE_PROT_NONE = (1UL << 58); ms->PTE_FILE = 0; /* unused */ } else if (THIS_KERNEL_VERSION >= LINUX(3,13,0)) { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 3; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 49; ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1); ms->PTE_PROT_NONE = (1UL << 58); ms->PTE_FILE = (1UL << 2); } else if (THIS_KERNEL_VERSION >= LINUX(3,11,0)) { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 4; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 0; /* unused */ ms->__SWP_OFFSET_MASK = 0; /* unused */ ms->PTE_PROT_NONE = (1UL << 2); ms->PTE_FILE = (1UL << 3); } else { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 3; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 0; /* unused */ ms->__SWP_OFFSET_MASK = 0; /* unused */ ms->PTE_PROT_NONE = (1UL << 1); ms->PTE_FILE = (1UL << 2); } if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); if (!machdep->hz) machdep->hz = 100; arm64_stackframe_init(); break; case POST_VM: /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!LIVE() && !arm64_get_crash_notes()) error(WARNING, "cannot retrieve registers for active task%s\n\n", kt->cpus > 1 ? "s" : ""); break; case LOG_ONLY: machdep->machspec = &arm64_machine_specific; arm64_calc_VA_BITS(); arm64_calc_phys_offset(); machdep->machspec->page_offset = ARM64_PAGE_OFFSET; break; } } /* * Accept or reject a symbol from the kernel namelist. */ static int arm64_verify_symbol(const char *name, ulong value, char type) { if (!name || !strlen(name)) return FALSE; if ((type == 'A') && STREQ(name, "_kernel_flags_le")) machdep->machspec->kernel_flags = le64toh(value); if (((type == 'A') || (type == 'a')) && (highest_bit_long(value) != 63)) return FALSE; if ((value == 0) && ((type == 'a') || (type == 'n') || (type == 'N') || (type == 'U'))) return FALSE; if (STREQ(name, "$d") || STREQ(name, "$x")) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (!(machdep->flags & KSYMS_START) && STREQ(name, "idmap_pg_dir")) machdep->flags |= KSYMS_START; return TRUE; } void arm64_dump_machdep_table(ulong arg) { const struct machine_specific *ms; int others, i; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PHYS_OFFSET) fprintf(fp, "%sPHYS_OFFSET", others++ ? "|" : ""); if (machdep->flags & VM_L2_64K) fprintf(fp, "%sVM_L2_64K", others++ ? "|" : ""); if (machdep->flags & VM_L3_4K) fprintf(fp, "%sVM_L3_4K", others++ ? "|" : ""); if (machdep->flags & VMEMMAP) fprintf(fp, "%sVMEMMAP", others++ ? "|" : ""); if (machdep->flags & KDUMP_ENABLED) fprintf(fp, "%sKDUMP_ENABLED", others++ ? "|" : ""); if (machdep->flags & MACHDEP_BT_TEXT) fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %lx\n", (ulong)machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", (ulonglong)machdep->memsize, (ulonglong)machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: arm64_eframe_search()\n"); fprintf(fp, " back_trace: arm64_back_trace_cmd()\n"); fprintf(fp, " in_alternate_stack: arm64_in_alternate_stack()\n"); fprintf(fp, " processor_speed: arm64_processor_speed()\n"); fprintf(fp, " uvtop: arm64_uvtop()->%s()\n", machdep->flags & VM_L3_4K ? "arm64_vtop_3level_4k" : "arm64_vtop_2level_64k"); fprintf(fp, " kvtop: arm64_kvtop()->%s()\n", machdep->flags & VM_L3_4K ? "arm64_vtop_3level_4k" : "arm64_vtop_2level_64k"); fprintf(fp, " get_task_pgd: arm64_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: arm64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: arm64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: arm64_vmalloc_start()\n"); fprintf(fp, " get_kvaddr_ranges: arm64_get_kvaddr_ranges()\n"); fprintf(fp, " is_task_addr: arm64_is_task_addr()\n"); fprintf(fp, " verify_symbol: arm64_verify_symbol()\n"); fprintf(fp, " dis_filter: arm64_dis_filter()\n"); fprintf(fp, " cmd_mach: arm64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: arm64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: arm64_is_uvaddr()\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " init_kernel_pgd: arm64_init_kernel_pgd\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " dumpfile_init: (not used)\n"); fprintf(fp, " process_elf_notes: process_elf64_notes()\n"); fprintf(fp, " verify_line_number: (not used)\n"); fprintf(fp, " xendump_p2m_create: (n/a)\n"); fprintf(fp, "xen_kdump_p2m_create: (n/a)\n"); fprintf(fp, " xendump_panic_task: (n/a)\n"); fprintf(fp, " get_xendump_regs: (n/a)\n"); fprintf(fp, " line_number_hooks: (not used)\n"); fprintf(fp, " last_pud_read: (not used)\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: "); if (PAGESIZE() == 65536) fprintf(fp, "(not used)\n"); else fprintf(fp, "%lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " clear_machdep_cache: arm64_clear_machdep_cache()\n"); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } ms = machdep->machspec; fprintf(fp, " machspec: %lx\n", (ulong)ms); fprintf(fp, " VA_BITS: %ld\n", ms->VA_BITS); fprintf(fp, " userspace_top: %016lx\n", ms->userspace_top); fprintf(fp, " page_offset: %016lx\n", ms->page_offset); fprintf(fp, " vmalloc_start_addr: %016lx\n", ms->vmalloc_start_addr); fprintf(fp, " vmalloc_end: %016lx\n", ms->vmalloc_end); fprintf(fp, " modules_vaddr: %016lx\n", ms->modules_vaddr); fprintf(fp, " modules_end: %016lx\n", ms->modules_end); fprintf(fp, " vmemmap_vaddr: %016lx\n", ms->vmemmap_vaddr); fprintf(fp, " vmemmap_end: %016lx\n", ms->vmemmap_end); fprintf(fp, " phys_offset: %lx\n", ms->phys_offset); fprintf(fp, "__exception_text_start: %lx\n", ms->__exception_text_start); fprintf(fp, " __exception_text_end: %lx\n", ms->__exception_text_end); fprintf(fp, " panic_task_regs: %lx\n", (ulong)ms->panic_task_regs); fprintf(fp, " PTE_PROT_NONE: %lx\n", ms->PTE_PROT_NONE); fprintf(fp, " PTE_FILE: "); if (ms->PTE_FILE) fprintf(fp, "%lx\n", ms->PTE_FILE); else fprintf(fp, "(unused)\n"); fprintf(fp, " __SWP_TYPE_BITS: %ld\n", ms->__SWP_TYPE_BITS); fprintf(fp, " __SWP_TYPE_SHIFT: %ld\n", ms->__SWP_TYPE_SHIFT); fprintf(fp, " __SWP_TYPE_MASK: %lx\n", ms->__SWP_TYPE_MASK); fprintf(fp, " __SWP_OFFSET_BITS: "); if (ms->__SWP_OFFSET_BITS) fprintf(fp, "%ld\n", ms->__SWP_OFFSET_BITS); else fprintf(fp, "(unused)\n"); fprintf(fp, " __SWP_OFFSET_SHIFT: %ld\n", ms->__SWP_OFFSET_SHIFT); fprintf(fp, " __SWP_OFFSET_MASK: "); if (ms->__SWP_OFFSET_MASK) fprintf(fp, "%lx\n", ms->__SWP_OFFSET_MASK); else fprintf(fp, "(unused)\n"); fprintf(fp, " crash_kexec_start: %lx\n", ms->crash_kexec_start); fprintf(fp, " crash_kexec_end: %lx\n", ms->crash_kexec_end); fprintf(fp, " crash_save_cpu_start: %lx\n", ms->crash_save_cpu_start); fprintf(fp, " crash_save_cpu_end: %lx\n", ms->crash_save_cpu_end); fprintf(fp, " kernel_flags: %lx\n", ms->kernel_flags); } /* * Parse machine dependent command line arguments. * * Force the phys_offset address via: * * --machdep phys_offset=
*/ static void arm64_parse_cmdline_args(void) { int index, i, c, err; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *p; ulong value = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %x\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { err = 0; if (STRNEQ(arglist[i], "phys_offset=")) { int megabytes = FALSE; int flags = RETURN_ON_ERROR | QUIET; if ((LASTCHAR(arglist[i]) == 'm') || (LASTCHAR(arglist[i]) == 'M')) { LASTCHAR(arglist[i]) = NULLCHAR; megabytes = TRUE; } p = arglist[i] + strlen("phys_offset="); if (strlen(p)) { if (megabytes) value = dtol(p, flags, &err); else value = htol(p, flags, &err); } if (!err) { if (megabytes) value = MEGABYTES(value); machdep->machspec->phys_offset = value; error(NOTE, "setting phys_offset to: 0x%lx\n\n", machdep->machspec->phys_offset); machdep->flags |= PHYS_OFFSET; continue; } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } } } static void arm64_calc_phys_offset(void) { struct machine_specific *ms = machdep->machspec; ulong phys_offset; if (machdep->flags & PHYS_OFFSET) /* --machdep override */ return; /* * Next determine suitable value for phys_offset. User can override this * by passing valid '--machdep phys_offset=' option. */ ms->phys_offset = 0; if (ACTIVE()) { char buf[BUFSIZE]; char *p1; int errflag; FILE *fp; if ((fp = fopen("/proc/iomem", "r")) == NULL) return; /* * Memory regions are sorted in ascending order. We take the * first region which should be correct for most uses. */ errflag = 1; while (fgets(buf, BUFSIZE, fp)) { if (strstr(buf, ": System RAM")) { clean_line(buf); errflag = 0; break; } } fclose(fp); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; *p1 = NULLCHAR; phys_offset = htol(buf, RETURN_ON_ERROR | QUIET, &errflag); if (errflag) return; ms->phys_offset = phys_offset; } else if (DISKDUMP_DUMPFILE() && diskdump_phys_base(&phys_offset)) { ms->phys_offset = phys_offset; } else if (KDUMP_DUMPFILE() && arm64_kdump_phys_base(&phys_offset)) { ms->phys_offset = phys_offset; } else { error(WARNING, "phys_offset cannot be determined from the dumpfile.\n"); error(CONT, "Using default value of 0. If this is not correct, then try\n"); error(CONT, "using the command line option: --machdep phys_offset=\n"); } if (CRASHDEBUG(1)) fprintf(fp, "using %lx as phys_offset\n", ms->phys_offset); } /* * Borrow the 32-bit ARM functionality. */ static int arm64_kdump_phys_base(ulong *phys_offset) { return arm_kdump_phys_base(phys_offset); } static void arm64_init_kernel_pgd(void) { int i; ulong value; if (!kernel_symbol_exists("init_mm") || !readmem(symbol_value("init_mm") + OFFSET(mm_struct_pgd), KVADDR, &value, sizeof(void *), "init_mm.pgd", RETURN_ON_ERROR)) { if (kernel_symbol_exists("swapper_pg_dir")) value = symbol_value("swapper_pg_dir"); else { error(WARNING, "cannot determine kernel pgd location\n"); return; } } for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value; } static int arm64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong kernel_pgd; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } kernel_pgd = vt->kernel_pgd[0]; *paddr = 0; switch (machdep->flags & (VM_L2_64K|VM_L3_4K)) { case VM_L2_64K: return arm64_vtop_2level_64k(kernel_pgd, kvaddr, paddr, verbose); case VM_L3_4K: return arm64_vtop_3level_4k(kernel_pgd, kvaddr, paddr, verbose); default: return FALSE; } } static int arm64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong user_pgd; readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &user_pgd, sizeof(long), "user pgd", FAULT_ON_ERROR); *paddr = 0; switch (machdep->flags & (VM_L2_64K|VM_L3_4K)) { case VM_L2_64K: return arm64_vtop_2level_64k(user_pgd, uvaddr, paddr, verbose); case VM_L3_4K: return arm64_vtop_3level_4k(user_pgd, uvaddr, paddr, verbose); default: return FALSE; } } #define PMD_TYPE_MASK 3 #define PMD_TYPE_SECT 1 #define PMD_TYPE_TABLE 2 #define SECTION_PAGE_MASK_2MB ((long)(~((MEGABYTES(2))-1))) #define SECTION_PAGE_MASK_512MB ((long)(~((MEGABYTES(512))-1))) static int arm64_vtop_2level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, PTRS_PER_PGD_L2_64K * sizeof(ulong)); pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L2_64K) & (PTRS_PER_PGD_L2_64K - 1)); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; /* * #define __PAGETABLE_PUD_FOLDED * #define __PAGETABLE_PMD_FOLDED */ if ((pgd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = (pgd_val & SECTION_PAGE_MASK_512MB) & PHYS_MASK; if (verbose) { fprintf(fp, " PAGE: %lx (512MB)\n\n", sectionbase); arm64_translate_pte(pgd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_512MB); return TRUE; } pte_base = (ulong *)PTOV(pgd_val & PHYS_MASK & (s32)machdep->pagemask); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L2_64K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L2_64K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = (PAGEBASE(pte_val) & PHYS_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static int arm64_vtop_3level_4k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pmd_base, *pmd_ptr, pmd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, PTRS_PER_PGD_L3_4K * sizeof(ulong)); pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L3_4K) & (PTRS_PER_PGD_L3_4K - 1)); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; /* * #define __PAGETABLE_PUD_FOLDED */ pmd_base = (ulong *)PTOV(pgd_val & PHYS_MASK & (s32)machdep->pagemask); FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L3_4K * sizeof(ulong)); pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L3_4K) & (PTRS_PER_PMD_L3_4K - 1)); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); if (!pmd_val) goto no_page; if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = (pmd_val & SECTION_PAGE_MASK_2MB) & PHYS_MASK; if (verbose) { fprintf(fp, " PAGE: %lx (2MB)\n\n", sectionbase); arm64_translate_pte(pmd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_2MB); return TRUE; } pte_base = (ulong *)PTOV(pmd_val & PHYS_MASK & (s32)machdep->pagemask); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L3_4K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L3_4K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = (PAGEBASE(pte_val) & PHYS_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static ulong arm64_get_task_pgd(ulong task) { struct task_context *tc; ulong pgd; if ((tc = task_to_context(task)) && readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "user pgd", RETURN_ON_ERROR)) return pgd; else return NO_TASK; } static ulong arm64_processor_speed(void) { return 0; }; /* * Gather and verify all of the backtrace requirements. */ static void arm64_stackframe_init(void) { long task_struct_thread; long thread_struct_cpu_context; long context_sp, context_pc, context_fp; struct syment *sp1, *sp1n, *sp2, *sp2n; STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_pid, "elf_prstatus", "pr_pid"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); machdep->machspec->__exception_text_start = symbol_value("__exception_text_start"); machdep->machspec->__exception_text_end = symbol_value("__exception_text_end"); if ((sp1 = kernel_symbol_search("crash_kexec")) && (sp1n = next_symbol(NULL, sp1)) && (sp2 = kernel_symbol_search("crash_save_cpu")) && (sp2n = next_symbol(NULL, sp2))) { machdep->machspec->crash_kexec_start = sp1->value; machdep->machspec->crash_kexec_end = sp1n->value; machdep->machspec->crash_save_cpu_start = sp2->value; machdep->machspec->crash_save_cpu_end = sp2n->value; machdep->flags |= KDUMP_ENABLED; } task_struct_thread = MEMBER_OFFSET("task_struct", "thread"); thread_struct_cpu_context = MEMBER_OFFSET("thread_struct", "cpu_context"); if ((task_struct_thread == INVALID_OFFSET) || (thread_struct_cpu_context == INVALID_OFFSET)) { error(INFO, "cannot determine task_struct.thread.context offset\n"); return; } /* * Pay for the convenience of using a hardcopy of a kernel structure. */ if (offsetof(struct arm64_stackframe, sp) != MEMBER_OFFSET("stackframe", "sp")) { error(INFO, "builtin stackframe.sp offset incorrect!\n"); return; } if (offsetof(struct arm64_stackframe, fp) != MEMBER_OFFSET("stackframe", "fp")) { error(INFO, "builtin stackframe.fp offset incorrect!\n"); return; } if (offsetof(struct arm64_stackframe, pc) != MEMBER_OFFSET("stackframe", "pc")) { error(INFO, "builtin stackframe.pc offset incorrect!\n"); return; } context_sp = MEMBER_OFFSET("cpu_context", "sp"); context_fp = MEMBER_OFFSET("cpu_context", "fp"); context_pc = MEMBER_OFFSET("cpu_context", "pc"); if (context_sp == INVALID_OFFSET) { error(INFO, "cannot determine cpu_context.sp offset\n"); return; } if (context_fp == INVALID_OFFSET) { error(INFO, "cannot determine cpu_context.fp offset\n"); return; } if (context_pc == INVALID_OFFSET) { error(INFO, "cannot determine cpu_context.pc offset\n"); return; } ASSIGN_OFFSET(task_struct_thread_context_sp) = task_struct_thread + thread_struct_cpu_context + context_sp; ASSIGN_OFFSET(task_struct_thread_context_fp) = task_struct_thread + thread_struct_cpu_context + context_fp; ASSIGN_OFFSET(task_struct_thread_context_pc) = task_struct_thread + thread_struct_cpu_context + context_pc; } #define KERNEL_MODE (1) #define USER_MODE (2) #define USER_EFRAME_OFFSET (304) /* * PSR bits */ #define PSR_MODE_EL0t 0x00000000 #define PSR_MODE_EL1t 0x00000004 #define PSR_MODE_EL1h 0x00000005 #define PSR_MODE_EL2t 0x00000008 #define PSR_MODE_EL2h 0x00000009 #define PSR_MODE_EL3t 0x0000000c #define PSR_MODE_EL3h 0x0000000d #define PSR_MODE_MASK 0x0000000f static int arm64_is_kernel_exception_frame(struct bt_info *bt, ulong stkptr) { struct arm64_pt_regs *regs; regs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(stkptr))]; if (INSTACK(regs->sp, bt) && INSTACK(regs->regs[29], bt) && !(regs->pstate & (0xffffffff00000000ULL | PSR_MODE32_BIT)) && is_kernel_text(regs->pc) && is_kernel_text(regs->regs[30])) { switch (regs->pstate & PSR_MODE_MASK) { case PSR_MODE_EL1t: case PSR_MODE_EL1h: return TRUE; } } return FALSE; } static int arm64_eframe_search(struct bt_info *bt) { ulong ptr, count; count = 0; for (ptr = bt->stackbase; ptr < bt->stacktop - SIZE(pt_regs); ptr++) { if (arm64_is_kernel_exception_frame(bt, ptr)) { fprintf(fp, "\nKERNEL-MODE EXCEPTION FRAME AT: %lx\n", ptr); arm64_print_exception_frame(bt, ptr, KERNEL_MODE, fp); count++; } } if (is_kernel_thread(bt->tc->task)) return count; ptr = bt->stacktop - USER_EFRAME_OFFSET; fprintf(fp, "%sUSER-MODE EXCEPTION FRAME AT: %lx\n", count++ ? "\n" : "", ptr); arm64_print_exception_frame(bt, ptr, USER_MODE, fp); return count; } static int arm64_in_exception_text(ulong ptr) { struct machine_specific *ms = machdep->machspec; return((ptr >= ms->__exception_text_start) && (ptr < ms->__exception_text_end)); } #define BACKTRACE_CONTINUE (1) #define BACKTRACE_COMPLETE_KERNEL (2) #define BACKTRACE_COMPLETE_USER (3) static int arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackframe *frame, FILE *ofp) { char *name, *name_plus_offset; ulong symbol_offset; struct syment *sp; struct load_module *lm; char buf[BUFSIZE]; name = closest_symbol(frame->pc); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(frame->pc, &symbol_offset); if (sp && symbol_offset) name_plus_offset = value_to_symstr(frame->pc, buf, bt->radix); } if (bt->flags & BT_FULL) { arm64_display_full_frame(bt, frame->sp); bt->frameptr = frame->sp; } fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, frame->sp, name_plus_offset ? name_plus_offset : name, frame->pc); if (BT_REFERENCE_CHECK(bt)) arm64_do_bt_reference_check(bt, frame->pc, name); if (module_symbol(frame->pc, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(frame->pc, buf, FALSE); if (strlen(buf)) fprintf(ofp, " %s\n", buf); } if (STREQ(name, "start_kernel") || STREQ(name, "secondary_start_kernel") || STREQ(name, "kthread") || STREQ(name, "kthreadd")) return BACKTRACE_COMPLETE_KERNEL; return BACKTRACE_CONTINUE; } static void arm64_display_full_frame(struct bt_info *bt, ulong sp) { int i, u_idx; ulong *up; ulong words, addr; char buf[BUFSIZE]; if (bt->frameptr == sp) return; if (!INSTACK(sp, bt) || !INSTACK(bt->frameptr, bt)) return; words = (sp - bt->frameptr) / sizeof(ulong); addr = bt->frameptr; u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } static int arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame) { unsigned long high, low, fp; unsigned long stack_mask; stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1; fp = frame->fp; low = frame->sp; high = (low + stack_mask) & ~(stack_mask); if (fp < low || fp > high || fp & 0xf) return FALSE; frame->sp = fp + 0x10; frame->fp = GET_STACK_ULONG(fp); frame->pc = GET_STACK_ULONG(fp + 8); return TRUE; } static void arm64_back_trace_cmd(struct bt_info *bt) { struct arm64_stackframe stackframe; int level; ulong exception_frame; FILE *ofp; ofp = BT_REFERENCE_CHECK(bt) ? pc->nullfp : fp; /* * stackframes are created from 3 contiguous stack addresses: * * x: contains stackframe.fp -- points to next triplet * x+8: contains stackframe.pc -- text return address * x+16: is the stackframe.sp address */ if (bt->flags & BT_KDUMP_ADJUST) { stackframe.fp = GET_STACK_ULONG(bt->bptr - 8); stackframe.pc = GET_STACK_ULONG(bt->bptr); stackframe.sp = bt->bptr + 8; bt->frameptr = stackframe.sp; } else if (bt->hp && bt->hp->esp) { stackframe.fp = GET_STACK_ULONG(bt->hp->esp - 8); stackframe.pc = bt->hp->eip ? bt->hp->eip : GET_STACK_ULONG(bt->hp->esp); stackframe.sp = bt->hp->esp + 8; bt->flags &= ~BT_REGS_NOT_FOUND; } else { stackframe.sp = bt->stkptr; stackframe.pc = bt->instptr; stackframe.fp = bt->frameptr; } if (bt->flags & BT_TEXT_SYMBOLS) { arm64_print_text_symbols(bt, &stackframe, ofp); if (BT_REFERENCE_FOUND(bt)) { print_task_header(fp, task_to_context(bt->task), 0); arm64_print_text_symbols(bt, &stackframe, fp); fprintf(fp, "\n"); } return; } if (bt->flags & BT_REGS_NOT_FOUND) return; if (!(bt->flags & BT_KDUMP_ADJUST)) { if (bt->flags & BT_USER_SPACE) goto complete_user; if (DUMPFILE() && is_task_active(bt->task)) { exception_frame = stackframe.fp - SIZE(pt_regs); if (arm64_is_kernel_exception_frame(bt, exception_frame)) arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp); } } level = exception_frame = 0; while (1) { bt->instptr = stackframe.pc; switch (arm64_print_stackframe_entry(bt, level, &stackframe, ofp)) { case BACKTRACE_COMPLETE_KERNEL: return; case BACKTRACE_COMPLETE_USER: goto complete_user; case BACKTRACE_CONTINUE: break; } if (exception_frame) { arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp); exception_frame = 0; } if (!arm64_unwind_frame(bt, &stackframe)) break; if (arm64_in_exception_text(bt->instptr) && INSTACK(stackframe.fp, bt)) exception_frame = stackframe.fp - SIZE(pt_regs); level++; } if (is_kernel_thread(bt->tc->task)) return; complete_user: exception_frame = bt->stacktop - USER_EFRAME_OFFSET; arm64_print_exception_frame(bt, exception_frame, USER_MODE, ofp); if ((bt->flags & (BT_USER_SPACE|BT_KDUMP_ADJUST)) == BT_USER_SPACE) fprintf(ofp, " #0 [user space]\n"); } static void arm64_print_text_symbols(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp) { int i; ulong *up; struct load_module *lm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *name; ulong start; if (bt->flags & BT_TEXT_SYMBOLS_ALL) start = bt->stackbase; else { start = frame->sp - 8; fprintf(ofp, "%sSTART: %s at %lx\n", space(VADDR_PRLEN > 8 ? 14 : 6), bt->flags & BT_SYMBOL_OFFSET ? value_to_symstr(frame->pc, buf2, bt->radix) : closest_symbol(frame->pc), frame->pc); } for (i = (start - bt->stackbase)/sizeof(ulong); i < LONGS_PER_STACK; i++) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (is_kernel_text(*up)) { name = closest_symbol(*up); fprintf(ofp, " %s[%s] %s at %lx", bt->flags & BT_ERROR_MASK ? " " : "", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(bt->stackbase + (i * sizeof(long)))), bt->flags & BT_SYMBOL_OFFSET ? value_to_symstr(*up, buf2, bt->radix) : name, *up); if (module_symbol(*up, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (BT_REFERENCE_CHECK(bt)) arm64_do_bt_reference_check(bt, *up, name); } } } static int arm64_in_kdump_text(struct bt_info *bt, struct arm64_stackframe *frame) { ulong *ptr, *start, *base; struct machine_specific *ms; if (!(machdep->flags & KDUMP_ENABLED)) return FALSE; base = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stackbase))]; if (bt->flags & BT_USER_SPACE) start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stacktop))]; else { if (INSTACK(frame->fp, bt)) start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(frame->fp))]; else start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stacktop))]; } ms = machdep->machspec; for (ptr = start - 8; ptr >= base; ptr--) { if ((*ptr >= ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end)) { bt->bptr = ((ulong)ptr - (ulong)base) + bt->tc->thread_info; if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_kexec)\n", bt->bptr, *ptr); return TRUE; } if ((*ptr >= ms->crash_save_cpu_start) && (*ptr < ms->crash_save_cpu_end)) { bt->bptr = ((ulong)ptr - (ulong)base) + bt->tc->thread_info; if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_save_cpu)\n", bt->bptr, *ptr); return TRUE; } } return FALSE; } static int arm64_get_dumpfile_stackframe(struct bt_info *bt, struct arm64_stackframe *frame) { struct machine_specific *ms = machdep->machspec; struct arm64_pt_regs *ptregs; if (!ms->panic_task_regs || (!ms->panic_task_regs[bt->tc->processor].sp && !ms->panic_task_regs[bt->tc->processor].pc)) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } ptregs = &ms->panic_task_regs[bt->tc->processor]; frame->sp = ptregs->sp; frame->pc = ptregs->pc; frame->fp = ptregs->regs[29]; if (!is_kernel_text(frame->pc) && in_user_stack(bt->tc->task, frame->sp)) bt->flags |= BT_USER_SPACE; if (arm64_in_kdump_text(bt, frame)) bt->flags |= BT_KDUMP_ADJUST; return TRUE; } static int arm64_get_stackframe(struct bt_info *bt, struct arm64_stackframe *frame) { if (!fill_task_struct(bt->task)) return FALSE; frame->sp = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_sp)); frame->pc = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_pc)); frame->fp = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_fp)); return TRUE; } static void arm64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { int ret; struct arm64_stackframe stackframe = { 0 }; if (DUMPFILE() && is_task_active(bt->task)) ret = arm64_get_dumpfile_stackframe(bt, &stackframe); else ret = arm64_get_stackframe(bt, &stackframe); if (!ret) error(WARNING, "cannot determine starting stack frame for task %lx\n", bt->task); bt->frameptr = stackframe.fp; if (pcp) *pcp = stackframe.pc; if (spp) *spp = stackframe.sp; } static void arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *ofp) { int i, r, rows, top_reg, is_64_bit; struct arm64_pt_regs *regs; struct syment *sp; ulong LR, SP, offset; char buf[BUFSIZE]; if (CRASHDEBUG(1)) fprintf(ofp, "pt_regs: %lx\n", pt_regs); regs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(pt_regs))]; if ((mode == USER_MODE) && (regs->pstate & PSR_MODE32_BIT)) { LR = regs->regs[14]; SP = regs->regs[13]; top_reg = 12; is_64_bit = FALSE; rows = 4; } else { LR = regs->regs[30]; SP = regs->sp; top_reg = 29; is_64_bit = TRUE; rows = 3; } switch (mode) { case USER_MODE: if (is_64_bit) fprintf(ofp, " PC: %016lx LR: %016lx SP: %016lx\n ", (ulong)regs->pc, LR, SP); else fprintf(ofp, " PC: %08lx LR: %08lx SP: %08lx PSTATE: %08lx\n ", (ulong)regs->pc, LR, SP, (ulong)regs->pstate); break; case KERNEL_MODE: fprintf(ofp, " PC: %016lx ", (ulong)regs->pc); if (is_kernel_text(regs->pc) && (sp = value_search(regs->pc, &offset))) { fprintf(ofp, "[%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); fprintf(ofp, "]\n"); } else fprintf(ofp, "[unknown or invalid address]\n"); fprintf(ofp, " LR: %016lx ", LR); if (is_kernel_text(LR) && (sp = value_search(LR, &offset))) { fprintf(ofp, "[%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); fprintf(ofp, "]\n"); } else fprintf(ofp, "[unknown or invalid address]\n"); fprintf(ofp, " SP: %016lx PSTATE: %08lx\n ", SP, (ulong)regs->pstate); break; } for (i = top_reg, r = 1; i >= 0; r++, i--) { fprintf(ofp, "%sX%d: ", i < 10 ? " " : "", i); fprintf(ofp, is_64_bit ? "%016lx" : "%08lx", (ulong)regs->regs[i]); if ((i == 0) || ((r % rows) == 0)) fprintf(ofp, "\n "); else fprintf(ofp, "%s", is_64_bit ? " " : " "); } if (is_64_bit) { fprintf(ofp, "ORIG_X0: %016lx SYSCALLNO: %lx", (ulong)regs->orig_x0, (ulong)regs->syscallno); if (mode == USER_MODE) fprintf(ofp, " PSTATE: %08lx", (ulong)regs->pstate); fprintf(ofp, "\n"); } if (is_kernel_text(regs->pc) && (bt->flags & BT_LINE_NUMBERS)) { get_line_number(regs->pc, buf, FALSE); if (strlen(buf)) fprintf(ofp, " %s\n", buf); } if (BT_REFERENCE_CHECK(bt)) { arm64_do_bt_reference_check(bt, regs->pc, NULL); arm64_do_bt_reference_check(bt, LR, NULL); arm64_do_bt_reference_check(bt, SP, NULL); arm64_do_bt_reference_check(bt, regs->pstate, NULL); for (i = 0; i <= top_reg; i++) arm64_do_bt_reference_check(bt, regs->regs[i], NULL); if (is_64_bit) { arm64_do_bt_reference_check(bt, regs->orig_x0, NULL); arm64_do_bt_reference_check(bt, regs->syscallno, NULL); } } } /* * Check a frame for a requested reference. */ static void arm64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) { ulong offset; struct syment *sp = NULL; if (!name) sp = value_search(text, &offset); else if (!text) sp = symbol_search(name); switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (name) { if (STREQ(name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } else { if (sp && !offset && STREQ(sp->name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } break; case BT_REF_HEXVAL: if (text) { if (bt->ref->hexval == text) bt->ref->cmdflags |= BT_REF_FOUND; } else if (sp && (bt->ref->hexval == sp->value)) bt->ref->cmdflags |= BT_REF_FOUND; else if (!name && !text && (bt->ref->hexval == 0)) bt->ref->cmdflags |= BT_REF_FOUND; break; } } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int arm64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, others, len1, len2, len3; ulong paddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; int page_present; paddr = pte & PHYS_MASK & (s32)machdep->pagemask; page_present = pte & (PTE_VALID | machdep->machspec->PTE_PROT_NONE); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf1, len1, CENTER|LJUST, "PTE")); if (!page_present) { swap_location(pte, buf1); if ((c = parse_line(buf1, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf1, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & PTE_VALID) fprintf(fp, "%sVALID", others++ ? "|" : ""); if (pte & machdep->machspec->PTE_FILE) fprintf(fp, "%sFILE", others++ ? "|" : ""); if (pte & machdep->machspec->PTE_PROT_NONE) fprintf(fp, "%sPROT_NONE", others++ ? "|" : ""); if (pte & PTE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (pte & PTE_RDONLY) fprintf(fp, "%sRDONLY", others++ ? "|" : ""); if (pte & PTE_SHARED) fprintf(fp, "%sSHARED", others++ ? "|" : ""); if (pte & PTE_AF) fprintf(fp, "%sAF", others++ ? "|" : ""); if (pte & PTE_NG) fprintf(fp, "%sNG", others++ ? "|" : ""); if (pte & PTE_PXN) fprintf(fp, "%sPXN", others++ ? "|" : ""); if (pte & PTE_UXN) fprintf(fp, "%sUXN", others++ ? "|" : ""); if (pte & PTE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte & PTE_SPECIAL) fprintf(fp, "%sSPECIAL", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return (page_present); } static ulong arm64_vmalloc_start(void) { return machdep->machspec->vmalloc_start_addr; } /* * Not so accurate since thread_info introduction. */ static int arm64_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * Filter dissassembly output if the output radix is not gdb's default 10 */ static int arm64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x"))) p1--; if (!(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x"))) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Machine dependent command. */ static void arm64_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != -1) { switch (c) { case 'c': case 'm': option_not_supported(c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); arm64_display_machine_stats(); } static void arm64_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); if ((mhz = machdep->processor_speed())) fprintf(fp, " PROCESSOR SPEED: %ld Mhz\n", mhz); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->machspec->page_offset); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", machdep->machspec->vmalloc_start_addr); fprintf(fp, "KERNEL MODULES BASE: %lx\n", machdep->machspec->modules_vaddr); fprintf(fp, "KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } static int arm64_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_present())) return cpus; else return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Retrieve task registers for the time of the crash. */ static int arm64_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf64_Nhdr *note; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i; if (!symbol_exists("crash_notes")) return FALSE; crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); return FALSE; } if (symbol_exists("__per_cpu_offset")) { /* * Add __per_cpu_offset for each cpu to form the notes pointer. */ for (i = 0; icpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(ms->panic_task_regs = calloc((size_t)kt->cpus, sizeof(struct arm64_pt_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = 0; i < kt->cpus; i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "failed to read note_buf_t\n"); goto fail; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf64_Nhdr *)buf; p = buf + sizeof(Elf64_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf64_Nhdr); if (sizeof(Elf64_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } } if (note->n_type != NT_PRSTATUS) { error(WARNING, "invalid note (n_type != NT_PRSTATUS)\n"); goto fail; } if (p[0] != 'C' || p[1] != 'O' || p[2] != 'R' || p[3] != 'E') { error(WARNING, "invalid note (name != \"CORE\"\n"); goto fail; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf64_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &ms->panic_task_regs[i], sizeof(struct arm64_pt_regs)); } FREEBUF(buf); FREEBUF(notes_ptrs); return TRUE; fail: FREEBUF(buf); FREEBUF(notes_ptrs); free(ms->panic_task_regs); ms->panic_task_regs = NULL; return FALSE; } static void arm64_clear_machdep_cache(void) { /* * TBD: probably not necessary... */ return; } static int arm64_in_alternate_stack(int cpu, ulong stkptr) { NOT_IMPLEMENTED(INFO); return FALSE; } static int compare_kvaddr(const void *v1, const void *v2) { struct vaddr_range *r1, *r2; r1 = (struct vaddr_range *)v1; r2 = (struct vaddr_range *)v2; return (r1->start < r2->start ? -1 : r1->start == r2->start ? 0 : 1); } static int arm64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->machspec->page_offset; vrp[cnt++].end = vt->high_memory; vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = machdep->machspec->vmalloc_start_addr; vrp[cnt++].end = last_vmalloc_address(); if (st->mods_installed) { vrp[cnt].type = KVADDR_MODULES; vrp[cnt].start = lowest_module_address(); vrp[cnt++].end = roundup(highest_module_address(), PAGESIZE()); } if (machdep->flags & VMEMMAP) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = machdep->machspec->vmemmap_vaddr; vrp[cnt++].end = vt->node_table[vt->numnodes-1].mem_map + (vt->node_table[vt->numnodes-1].size * SIZE(page)); } qsort(vrp, cnt, sizeof(struct vaddr_range), compare_kvaddr); return cnt; } /* * Include both vmalloc'd, module and vmemmap address space as VMALLOC space. */ int arm64_IS_VMALLOC_ADDR(ulong vaddr) { struct machine_specific *ms = machdep->machspec; return ((vaddr >= ms->vmalloc_start_addr && vaddr <= ms->vmalloc_end) || ((machdep->flags & VMEMMAP) && (vaddr >= ms->vmemmap_vaddr && vaddr <= ms->vmemmap_end)) || (vaddr >= ms->modules_vaddr && vaddr <= ms->modules_end)); } static void arm64_calc_VA_BITS(void) { int bitval; struct syment *sp; ulong value; if (!(sp = symbol_search("swapper_pg_dir")) && !(sp = symbol_search("idmap_pg_dir")) && !(sp = symbol_search("_text")) && !(sp = symbol_search("stext"))) { for (sp = st->symtable; sp < st->symend; sp++) { if (highest_bit_long(sp->value) == 63) break; } } if (sp) value = sp->value; else value = kt->vmcoreinfo.log_buf_SYMBOL; /* crash --log */ for (bitval = highest_bit_long(value); bitval; bitval--) { if ((value & (1UL << bitval)) == 0) { machdep->machspec->VA_BITS = bitval + 2; break; } } if (CRASHDEBUG(1)) fprintf(fp, "VA_BITS: %ld\n", machdep->machspec->VA_BITS); } /* * The size and end of the vmalloc range is dependent upon the kernel's * VMEMMAP_SIZE value, and the vmemmap range is dependent upon the end * of the vmalloc range as well as the VMEMMAP_SIZE: * * #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) * #define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) * #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) * * Since VMEMMAP_SIZE is dependent upon the size of a struct page, * the two ranges cannot be determined until POST_GDB. */ #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) #define SZ_64K 0x00010000 static void arm64_calc_virtual_memory_ranges(void) { struct machine_specific *ms = machdep->machspec; ulong vmemmap_start, vmemmap_end, vmemmap_size; ulong vmalloc_end; ulong PUD_SIZE = UNINITIALIZED; if (THIS_KERNEL_VERSION < LINUX(3,17,0)) /* use original hardwired values */ return; STRUCT_SIZE_INIT(page, "page"); switch (machdep->flags & (VM_L2_64K|VM_L3_4K)) { case VM_L2_64K: PUD_SIZE = PGDIR_SIZE_L2_64K; break; case VM_L3_4K: PUD_SIZE = PGDIR_SIZE_L3_4K; break; } vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) * SIZE(page), PUD_SIZE); vmalloc_end = (ms->page_offset - PUD_SIZE - vmemmap_size - SZ_64K); vmemmap_start = vmalloc_end + SZ_64K; vmemmap_end = vmemmap_start + vmemmap_size; ms->vmalloc_end = vmalloc_end - 1; ms->vmemmap_vaddr = vmemmap_start; ms->vmemmap_end = vmemmap_end - 1; } static int arm64_is_uvaddr(ulong addr, struct task_context *tc) { return (addr < ARM64_USERSPACE_TOP); } ulong arm64_swp_type(ulong pte) { struct machine_specific *ms = machdep->machspec; pte >>= ms->__SWP_TYPE_SHIFT; pte &= ms->__SWP_TYPE_MASK; return pte; } ulong arm64_swp_offset(ulong pte) { struct machine_specific *ms = machdep->machspec; pte >>= ms->__SWP_OFFSET_SHIFT; if (ms->__SWP_OFFSET_MASK) pte &= ms->__SWP_OFFSET_MASK; return pte; } #endif /* ARM64 */ crash-7.1.4/ramdump.c0000664000000000000000000002076112634305150013141 0ustar rootroot/* * ramdump.c - core analysis suite * * Copyright (c) 2014 Broadcom Corporation * Oza Pawandeep * Vikram Prakash * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Oza Pawandeep */ #define _LARGEFILE64_SOURCE 1 /* stat64() */ #include "defs.h" #include struct ramdump_def { char *path; int rfd; ulonglong start_paddr; ulonglong end_paddr; }; static struct ramdump_def *ramdump; static int nodes; static char *user_elf = NULL; static char elf_default[] = "/var/tmp/ramdump_elf_XXXXXX"; static void alloc_elf_header(Elf64_Ehdr *ehdr, ushort e_machine) { memcpy(ehdr->e_ident, ELFMAG, SELFMAG); ehdr->e_ident[EI_CLASS] = ELFCLASS64; ehdr->e_ident[EI_DATA] = ELFDATA2LSB; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELFOSABI_LINUX; ehdr->e_ident[EI_ABIVERSION] = 0; memset(ehdr->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); ehdr->e_type = ET_CORE; ehdr->e_machine = e_machine; ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf64_Ehdr); ehdr->e_shoff = 0; ehdr->e_flags = 0; ehdr->e_ehsize = sizeof(Elf64_Ehdr); ehdr->e_phentsize = sizeof(Elf64_Phdr); ehdr->e_phnum = 1 + nodes; ehdr->e_shentsize = 0; ehdr->e_shnum = 0; ehdr->e_shstrndx = 0; } static int alloc_program_headers(Elf64_Phdr *phdr) { unsigned int i; struct stat64 st; for (i = 0; i < nodes; i++) { phdr[i].p_type = PT_LOAD; if (0 > stat64(ramdump[i].path, &st)) { error(INFO, "ramdump stat failed\n"); return -1; } phdr[i].p_filesz = st.st_size; phdr[i].p_memsz = phdr[i].p_filesz; phdr[i].p_vaddr = 0; phdr[i].p_paddr = ramdump[i].start_paddr; ramdump[i].end_paddr = ramdump[i].start_paddr + st.st_size - 1; phdr[i].p_flags = PF_R | PF_W | PF_X; phdr[i].p_align = 0; } return 0; } static char *write_elf(Elf64_Phdr *load, Elf64_Ehdr *e_head, size_t data_offset) { #define CPY_BUF_SZ 4096 int fd1, fd2, i, err = 1; char *buf; char *out_elf; size_t offset; ssize_t rd, len; buf = (char *)malloc(CPY_BUF_SZ); offset = data_offset; if (user_elf) { fd2 = open(user_elf, O_CREAT|O_RDWR, S_IRUSR|S_IWUSR); if (fd2 < 0) { error(INFO, "%s open error, %s\n", user_elf, strerror(errno)); goto end1; } out_elf = user_elf; } else { fd2 = mkstemp(elf_default); if (fd2 < 0) { error(INFO, "%s open error, %s\n", elf_default, strerror(errno)); goto end1; } out_elf = elf_default; pc->flags2 |= RAMDUMP; } if (user_elf) { sprintf(buf, "creating ELF dumpfile: %s", out_elf); please_wait(buf); } else if (CRASHDEBUG(1)) fprintf(fp, "creating temporary ELF header: %s\n\n", elf_default); while (offset > 0) { len = write(fd2, e_head + (data_offset - offset), offset); if (len < 0) { error(INFO, "ramdump write error, %s\n", strerror(errno)); goto end; } offset -= len; } if (user_elf) { for (i = 0; i < nodes; i++) { offset = load[i].p_offset; fd1 = open(ramdump[i].path, O_RDONLY, S_IRUSR); if (fd1 < 0) { error(INFO, "%s open error, %s\n", ramdump[i].path, strerror(errno)); goto end; } lseek(fd2, (off_t)offset, SEEK_SET); while ((rd = read(fd1, buf, CPY_BUF_SZ)) > 0) { if (write(fd2, buf, rd) != rd) { error(INFO, "%s write error, %s\n", ramdump[i].path, strerror(errno)); close(fd1); goto end; } } close(fd1); } please_wait_done(); } err = 0; end: close(fd2); end1: free(buf); return err ? NULL : out_elf; } static void alloc_notes(Elf64_Phdr *notes) { /* Nothing filled in as of now */ notes->p_type = PT_NOTE; notes->p_offset = 0; notes->p_vaddr = 0; notes->p_paddr = 0; notes->p_filesz = 0; notes->p_memsz = 0; notes->p_flags = 0; notes->p_align = 0; } char *ramdump_to_elf(void) { int i; char *ptr, *e_file = NULL; ushort e_machine = 0; size_t offset, data_offset; size_t l_offset; Elf64_Phdr *notes, *load; Elf64_Ehdr *e_head; if (machine_type("ARM")) e_machine = EM_ARM; else if (machine_type("ARM64")) e_machine = EM_AARCH64; else if (machine_type("MIPS")) e_machine = EM_MIPS; else error(FATAL, "ramdump: unsupported machine type: %s\n", MACHINE_TYPE); e_head = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) + (nodes * sizeof(Elf64_Phdr)) + (CPY_BUF_SZ * 2)); ptr = (char *)e_head; offset = 0; alloc_elf_header(e_head, e_machine); ptr += sizeof(Elf64_Ehdr); offset += sizeof(Elf64_Ehdr); notes = (Elf64_Phdr *)ptr; alloc_notes(notes); offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); load = (Elf64_Phdr *)ptr; if (alloc_program_headers(load)) goto end; offset += sizeof(Elf64_Phdr) * nodes; ptr += sizeof(Elf64_Phdr) * nodes; /* Empty note */ notes->p_offset = offset; l_offset = offset; data_offset = offset; for (i = 0; i < nodes; i++) { load[i].p_offset = l_offset; l_offset += load[i].p_filesz; } e_file = write_elf(load, e_head, data_offset); end: free(e_head); return e_file; } int is_ramdump(char *p) { char *x = NULL, *y = NULL, *pat; size_t len; char *pattern; int err = 0; if (nodes || !strchr(p, '@')) return 0; len = strlen(p); pattern = (char *)malloc(len + 1); strlcpy(pattern, p, len + 1); pat = pattern; while ((pat = strtok_r(pat, ",", &x))) { if ((pat = strtok_r(pat, "@", &y))) { nodes++; ramdump = realloc(ramdump, sizeof(struct ramdump_def) * nodes); if (!ramdump) error(FATAL, "realloc failure\n"); ramdump[nodes - 1].path = pat; pat = strtok_r(NULL, "@", &y); ramdump[nodes - 1].start_paddr = htoll(pat, RETURN_ON_ERROR, &err); if (err == TRUE) error(FATAL, "Invalid ramdump address\n"); if ((ramdump[nodes - 1].rfd = open(ramdump[nodes - 1].path, O_RDONLY)) < 0) error(FATAL, "ramdump %s open failed:%s\n", ramdump[nodes - 1].path, strerror(errno)); } pat = NULL; } return nodes; } void ramdump_elf_output_file(char *opt) { user_elf = opt; } void ramdump_cleanup(void) { if (!user_elf) unlink(elf_default); } int read_ramdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; int i, found; struct ramdump_def *r = &ramdump[0]; offset = 0; for (i = found = 0; i < nodes; i++) { r = &ramdump[i]; if ((paddr >= r->start_paddr) && (paddr <= r->end_paddr)) { offset = (off_t)paddr - (off_t)r->start_paddr; found++; break; } } if (!found) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: READ_ERROR: " "offset not found for paddr: %llx\n", (ulonglong)paddr); return READ_ERROR; } if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: addr: %lx paddr: %llx cnt: %d offset: %llx\n", addr, (ulonglong)paddr, cnt, (ulonglong)offset); if (lseek(r->rfd, offset, SEEK_SET) == -1) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: SEEK_ERROR: " "offset: %llx\n", (ulonglong)offset); return SEEK_ERROR; } if (read(r->rfd, bufptr, cnt) != cnt) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: READ_ERROR: " "offset: %llx\n", (ulonglong)offset); return READ_ERROR; } return cnt; } void show_ramdump_files(void) { int i; fprintf(fp, "%s [temporary ELF header]\n", elf_default); for (i = 0; i < nodes; i++) { fprintf(fp, "%s %s", i ? "\n" : "", ramdump[i].path); } } void dump_ramdump_data() { int i; if (!user_elf && !is_ramdump_image()) return; fprintf(fp, "\nramdump data:\n"); fprintf(fp, " user_elf: %s\n", user_elf ? user_elf : "(unused)"); fprintf(fp, " elf_default: %s\n", user_elf ? "(unused)" : elf_default); fprintf(fp, " nodes: %d\n", nodes); for (i = 0; i < nodes; i++) { fprintf(fp, " ramdump[%d]:\n", i); fprintf(fp, " path: %s\n", ramdump[i].path); fprintf(fp, " rfd: %d\n", ramdump[i].rfd); fprintf(fp, " start_paddr: %llx\n", (ulonglong)ramdump[i].start_paddr); fprintf(fp, " end_paddr: %llx\n", (ulonglong)ramdump[i].end_paddr); } fprintf(fp, "\n"); } int is_ramdump_image(void) { return (pc->flags2 & RAMDUMP ? TRUE : FALSE); } crash-7.1.4/gdb_interface.c0000775000000000000000000006550512634305150014260 0ustar rootroot/* gdb_interface.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static void exit_after_gdb_info(void); static int is_restricted_command(char *, ulong); static void strip_redirection(char *); int get_frame_offset(ulong); int *gdb_output_format; unsigned int *gdb_print_max; int *gdb_prettyprint_structs; int *gdb_prettyprint_arrays; int *gdb_repeat_count_threshold; int *gdb_stop_print_at_null; unsigned int *gdb_output_radix; static ulong gdb_user_print_option_address(char *); /* * Called from main() this routine sets up the call-back hook such that * gdb's main() routine -- renamed gdb_main() -- will call back to * our main_loop() after gdb initializes. */ void gdb_main_loop(int argc, char **argv) { argc = 1; if (pc->flags & SILENT) { if (pc->flags & READNOW) argv[argc++] = "--readnow"; argv[argc++] = "--quiet"; argv[argc++] = pc->namelist_debug ? pc->namelist_debug : (pc->debuginfo_file && (st->flags & CRC_MATCHES) ? pc->debuginfo_file : pc->namelist); } else { if (pc->flags & READNOW) argv[argc++] = "--readnow"; argv[argc++] = pc->namelist_debug ? pc->namelist_debug : (pc->debuginfo_file && (st->flags & CRC_MATCHES) ? pc->debuginfo_file : pc->namelist); } if (CRASHDEBUG(1)) { int i; fprintf(fp, "gdb "); for (i = 1; i < argc; i++) fprintf(fp, "%s ", argv[i]); fprintf(fp, "\n"); } optind = 0; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) command_loop_hook = main_loop; #else deprecated_command_loop_hook = main_loop; #endif gdb_main_entry(argc, argv); } /* * Update any hooks that gdb has set. */ void update_gdb_hooks(void) { #if defined(GDB_6_0) || defined(GDB_6_1) command_loop_hook = pc->flags & VERSION_QUERY ? exit_after_gdb_info : main_loop; target_new_objfile_hook = NULL; #endif #if defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) deprecated_command_loop_hook = pc->flags & VERSION_QUERY ? exit_after_gdb_info : main_loop; #endif } void gdb_readnow_warning(void) { if ((THIS_GCC_VERSION >= GCC(3,4,0)) && (THIS_GCC_VERSION < GCC(4,0,0)) && !(pc->flags & READNOW)) { fprintf(stderr, "WARNING: Because this kernel was compiled with gcc version %d.%d.%d, certain\n" " commands or command options may fail unless crash is invoked with\n" " the \"--readnow\" command line option.\n\n", kt->gcc_version[0], kt->gcc_version[1], kt->gcc_version[2]); } } /* * Used only by the -v command line option, get gdb to initialize itself * with no arguments, print its version and GPL paragraph, and then call * back to exit_after_gdb_info(). */ void display_gdb_banner(void) { optind = 0; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) command_loop_hook = exit_after_gdb_info; #else deprecated_command_loop_hook = exit_after_gdb_info; #endif args[0] = "gdb"; args[1] = "-version"; gdb_main_entry(2, args); } static void exit_after_gdb_info(void) { fprintf(fp, "\n"); clean_exit(0); } /* * Stash a copy of the gdb version locally. This can be called before * gdb gets initialized, so bypass gdb_interface(). */ void get_gdb_version(void) { struct gnu_request request; if (!pc->gdb_version) { request.command = GNU_VERSION; gdb_command_funnel(&request); /* bypass gdb_interface() */ pc->gdb_version = request.buf; } } void gdb_session_init(void) { struct gnu_request *req; int debug_data_pulled_in; if (!have_partial_symbols() && !have_full_symbols()) no_debugging_data(FATAL); /* * Restore the SIGINT and SIGPIPE handlers, which got temporarily * re-assigned by gdb. The SIGINT call also initializes GDB's * SIGINT sigaction. */ SIGACTION(SIGINT, restart, &pc->sigaction, &pc->gdb_sigaction); SIGACTION(SIGPIPE, SIG_IGN, &pc->sigaction, NULL); if (!(pc->flags & DROP_CORE)) SIGACTION(SIGSEGV, restart, &pc->sigaction, NULL); /* * Set up pointers to gdb variables. */ #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) gdb_output_format = &output_format; gdb_print_max = &print_max; gdb_prettyprint_structs = &prettyprint_structs; gdb_prettyprint_arrays = &prettyprint_arrays; gdb_repeat_count_threshold = &repeat_count_threshold; gdb_stop_print_at_null = &stop_print_at_null; gdb_output_radix = &output_radix; #else gdb_output_format = (int *) gdb_user_print_option_address("output_format"); gdb_print_max = (unsigned int *) gdb_user_print_option_address("print_max"); gdb_prettyprint_structs = (int *) gdb_user_print_option_address("prettyprint_structs"); gdb_prettyprint_arrays = (int *) gdb_user_print_option_address("prettyprint_arrays"); gdb_repeat_count_threshold = (int *) gdb_user_print_option_address("repeat_count_threshold"); gdb_stop_print_at_null = (int *) gdb_user_print_option_address("stop_print_at_null"); gdb_output_radix = (unsigned int *) gdb_user_print_option_address("output_radix"); #endif /* * If the output radix is set via the --hex or --dec command line * option, then pc->output_radix will be non-zero; otherwise use * the gdb default. */ if (pc->output_radix) { *gdb_output_radix = pc->output_radix; *gdb_output_format = (*gdb_output_radix == 10) ? 0 : 'x'; } switch (*gdb_output_radix) { case 10: case 16: pc->output_radix = *gdb_output_radix; break; default: pc->output_radix = *gdb_output_radix = 10; *gdb_output_format = 0; } *gdb_prettyprint_structs = 1; *gdb_repeat_count_threshold = 0x7fffffff; *gdb_print_max = 256; #ifdef GDB_5_3 gdb_disassemble_from_exec = 0; #endif pc->flags |= GDB_INIT; /* set here so gdb_interface will work */ req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->buf = GETBUF(BUFSIZE); /* * Make sure the namelist has symbolic data. Later versions of * gcc may require that debug data be pulled in by printing a * static kernel data structure. */ debug_data_pulled_in = FALSE; retry: BZERO(req->buf, BUFSIZE); req->command = GNU_GET_DATATYPE; req->name = XEN_HYPER_MODE() ? "page_info" : "task_struct"; req->flags = GNU_RETURN_ON_ERROR; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) { if (XEN_HYPER_MODE()) no_debugging_data(WARNING); /* just bail out */ if (!debug_data_pulled_in) { if (CRASHDEBUG(1)) error(INFO, "gdb_session_init: pulling in debug data by accessing init_mm.mmap %s\n", symbol_exists("sysfs_mount") ? "and syfs_mount" : ""); debug_data_pulled_in = TRUE; req->command = GNU_PASS_THROUGH; req->flags = GNU_RETURN_ON_ERROR|GNU_NO_READMEM; req->name = NULL; if (symbol_exists("sysfs_mount")) sprintf(req->buf, "print sysfs_mount, init_mm.mmap"); else sprintf(req->buf, "print init_mm.mmap"); gdb_interface(req); if (!(req->flags & GNU_COMMAND_FAILED)) goto retry; } no_debugging_data(WARNING); } if (pc->flags & KERNEL_DEBUG_QUERY) { fprintf(fp, "\n%s: %s: contains debugging data\n\n", pc->program_name, pc->namelist); if (REMOTE()) remote_exit(); clean_exit(0); } /* * Set up any pre-ordained gdb settings here that can't be * accessed directly. */ req->command = GNU_PASS_THROUGH; req->name = NULL, req->flags = 0; sprintf(req->buf, "set height 0"); gdb_interface(req); req->command = GNU_PASS_THROUGH; req->name = NULL, req->flags = 0; sprintf(req->buf, "set width 0"); gdb_interface(req); /* * Patch gdb's symbol values with the correct values from either * the System.map or non-debug vmlinux, whichever is in effect. */ if ((pc->flags & SYSMAP) || (kt->flags & (RELOC_SET|RELOC_FORCE)) || (pc->namelist_debug && !pc->debuginfo_file)) { req->command = GNU_PATCH_SYMBOL_VALUES; req->flags = GNU_RETURN_ON_ERROR; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) error(FATAL, "patching of gdb symbol values failed\n"); } else if (!(pc->flags & SILENT)) fprintf(fp, "\n"); FREEBUF(req->buf); FREEBUF(req); } /* * Quickest way to gdb -- just pass a command string to pass through. */ int gdb_pass_through(char *cmd, FILE *fptr, ulong flags) { struct gnu_request *req; int retval; if (CRASHDEBUG(1)) console("gdb_pass_through: [%s]\n", cmd); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->buf = cmd; if (fptr) req->fp = fptr; req->command = GNU_PASS_THROUGH; req->flags = flags; gdb_interface(req); if ((req->flags & (GNU_RETURN_ON_ERROR|GNU_COMMAND_FAILED)) == (GNU_RETURN_ON_ERROR|GNU_COMMAND_FAILED)) retval = FALSE; else retval = TRUE; FREEBUF(req); return retval; } /* * General purpose routine for passing commands to gdb. All gdb commands * come through here, where they are passed to gdb_command_funnel(). */ void gdb_interface(struct gnu_request *req) { if (!(pc->flags & GDB_INIT)) error(FATAL, "gdb_interface: gdb not initialized?\n"); if (output_closed()) restart(0); if (!req->fp) { req->fp = ((pc->flags & RUNTIME) || (pc->flags2 & ALLOW_FP)) ? fp : CRASHDEBUG(1) ? fp : pc->nullfp; } pc->cur_req = req; pc->cur_gdb_cmd = req->command; if (req->flags & GNU_RETURN_ON_ERROR) { error_hook = gdb_error_hook; if (setjmp(pc->gdb_interface_env)) { pc->last_gdb_cmd = pc->cur_gdb_cmd; pc->cur_gdb_cmd = 0; pc->cur_req = NULL; req->flags |= GNU_COMMAND_FAILED; pc->flags &= ~IN_GDB; return; } } else error_hook = NULL; if (CRASHDEBUG(2)) dump_gnu_request(req, IN_GDB); if (!(pc->flags & DROP_CORE)) SIGACTION(SIGSEGV, restart, &pc->sigaction, NULL); else SIGACTION(SIGSEGV, SIG_DFL, &pc->sigaction, NULL); if (interruptible()) { SIGACTION(SIGINT, pc->gdb_sigaction.sa_handler, &pc->gdb_sigaction, NULL); } else { SIGACTION(SIGINT, SIG_IGN, &pc->sigaction, NULL); SIGACTION(SIGPIPE, SIG_IGN, &pc->sigaction, NULL); } pc->flags |= IN_GDB; gdb_command_funnel(req); pc->flags &= ~IN_GDB; SIGACTION(SIGINT, restart, &pc->sigaction, NULL); SIGACTION(SIGSEGV, SIG_DFL, &pc->sigaction, NULL); if (CRASHDEBUG(2)) dump_gnu_request(req, !IN_GDB); error_hook = NULL; pc->last_gdb_cmd = pc->cur_gdb_cmd; pc->cur_gdb_cmd = 0; pc->cur_req = NULL; } /* * help -g output */ void dump_gdb_data(void) { fprintf(fp, " prettyprint_arrays: %d\n", *gdb_prettyprint_arrays); fprintf(fp, " prettyprint_structs: %d\n", *gdb_prettyprint_structs); fprintf(fp, "repeat_count_threshold: %x\n", *gdb_repeat_count_threshold); fprintf(fp, " stop_print_at_null: %d\n", *gdb_stop_print_at_null); fprintf(fp, " print_max: %d\n", *gdb_print_max); fprintf(fp, " output_radix: %d\n", *gdb_output_radix); fprintf(fp, " output_format: "); switch (*gdb_output_format) { case 'x': fprintf(fp, "hex\n"); break; case 'o': fprintf(fp, "octal\n"); break; case 0: fprintf(fp, "decimal\n"); break; } } void dump_gnu_request(struct gnu_request *req, int in_gdb) { int others; char buf[BUFSIZE]; if (pc->flags & KERNEL_DEBUG_QUERY) return; console("%scommand: %d (%s)\n", in_gdb ? "GDB IN: " : "GDB OUT: ", req->command, gdb_command_string(req->command, buf, TRUE)); console("buf: %lx ", req->buf); if (req->buf && ascii_string(req->buf)) console(" \"%s\"", req->buf); console("\n"); console("fp: %lx ", req->fp); if (req->fp == pc->nullfp) console("(pc->nullfp) "); if (req->fp == pc->stdpipe) console("(pc->stdpipe) "); if (req->fp == pc->pipe) console("(pc->pipe) "); if (req->fp == pc->ofile) console("(pc->ofile) "); if (req->fp == pc->ifile) console("(pc->ifile) "); if (req->fp == pc->ifile_pipe) console("(pc->ifile_pipe) "); if (req->fp == pc->ifile_ofile) console("(pc->ifile_ofile) "); if (req->fp == pc->tmpfile) console("(pc->tmpfile) "); if (req->fp == pc->saved_fp) console("(pc->saved_fp) "); if (req->fp == pc->tmp_fp) console("(pc->tmp_fp) "); console("flags: %lx (", req->flags); others = 0; if (req->flags & GNU_PRINT_LINE_NUMBERS) console("%sGNU_PRINT_LINE_NUMBERS", others++ ? "|" : ""); if (req->flags & GNU_FUNCTION_ONLY) console("%sGNU_FUNCTION_ONLY", others++ ? "|" : ""); if (req->flags & GNU_PRINT_ENUMERATORS) console("%sGNU_PRINT_ENUMERATORS", others++ ? "|" : ""); if (req->flags & GNU_RETURN_ON_ERROR) console("%sGNU_RETURN_ON_ERROR", others++ ? "|" : ""); if (req->flags & GNU_FROM_TTY_OFF) console("%sGNU_FROM_TTY_OFF", others++ ? "|" : ""); if (req->flags & GNU_NO_READMEM) console("%sGNU_NO_READMEM", others++ ? "|" : ""); if (req->flags & GNU_VAR_LENGTH_TYPECODE) console("%sGNU_VAR_LENGTH_TYPECODE", others++ ? "|" : ""); console(")\n"); console("addr: %lx ", req->addr); console("addr2: %lx ", req->addr2); console("count: %ld\n", req->count); if ((ulong)req->name > (ulong)PATCH_KERNEL_SYMBOLS_STOP) console("name: \"%s\" ", req->name); else console("name: %lx ", (ulong)req->name); console("length: %ld ", req->length); console("typecode: %d\n", req->typecode); #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) console("typename: %s\n", req->typename); #else console("type_name: %s\n", req->type_name); #endif console("target_typename: %s\n", req->target_typename); console("target_length: %ld ", req->target_length); console("target_typecode: %d ", req->target_typecode); console("is_typedef: %d ", req->is_typedef); console("member: \"%s\" ", req->member); console("member_offset: %ld\n", req->member_offset); console("member_length: %ld\n", req->member_length); console("member_typecode: %d\n", req->member_typecode); console("value: %lx ", req->value); console("tagname: \"%s\" ", req->tagname); console("pc: %lx ", req->pc); if (is_kernel_text(req->pc)) console("(%s)", value_to_symstr(req->pc, buf, 0)); console("\n"); console("sp: %lx ", req->sp); console("ra: %lx ", req->ra); console("frame: %ld ", req->frame); console("prevsp: %lx\n", req->prevsp); console("prevpc: %lx ", req->prevpc); console("lastsp: %lx ", req->lastsp); console("task: %lx ", req->task); console("debug: %lx\n", req->debug); console("\n"); } char * gdb_command_string(int cmd, char *buf, int live) { switch (cmd) { case GNU_PASS_THROUGH: sprintf(buf, "GNU_PASS_THROUGH"); break; case GNU_DATATYPE_INIT: sprintf(buf, "GNU_DATATYPE_INIT"); break; case GNU_DISASSEMBLE: sprintf(buf, "GNU_DISASSEMBLE"); break; case GNU_GET_LINE_NUMBER: sprintf(buf, "GNU_GET_LINE_NUMBER"); break; case GNU_GET_DATATYPE: if (live) sprintf(buf, "GNU_GET_DATATYPE[%s]", pc->cur_req->name ? pc->cur_req->name : "?"); else sprintf(buf, "GNU_GET_DATATYPE"); break; case GNU_STACK_TRACE: sprintf(buf, "GNU_STACK_TRACE"); break; case GNU_ALPHA_FRAME_OFFSET: sprintf(buf, "GNU_ALPHA_FRAME_OFFSET"); break; case GNU_COMMAND_EXISTS: sprintf(buf, "GNU_COMMAND_EXISTS"); break; case GNU_FUNCTION_NUMARGS: sprintf(buf, "GNU_FUNCTION_NUMARGS"); break; case GNU_RESOLVE_TEXT_ADDR: sprintf(buf, "GNU_RESOLVE_TEXT_ADDR"); break; case GNU_DEBUG_COMMAND: sprintf(buf, "GNU_DEBUG_COMMAND"); break; case GNU_ADD_SYMBOL_FILE: sprintf(buf, "GNU_ADD_SYMBOL_FILE"); break; case GNU_DELETE_SYMBOL_FILE: sprintf(buf, "GNU_DELETE_SYMBOL_FILE"); break; case GNU_VERSION: sprintf(buf, "GNU_VERSION"); break; case GNU_GET_SYMBOL_TYPE: sprintf(buf, "GNU_GET_SYMBOL_TYPE"); break; case GNU_PATCH_SYMBOL_VALUES: sprintf(buf, "GNU_PATCH_SYMBOL_VALUES"); break; case GNU_USER_PRINT_OPTION: sprintf(buf, "GNU_USER_PRINT_OPTION"); break; case GNU_SET_CRASH_BLOCK: sprintf(buf, "GNU_SET_CRASH_BLOCK"); break; case GNU_GET_FUNCTION_RANGE: sprintf(buf, "GNU_GET_FUNCTION_RANGE"); break; case 0: buf[0] = NULLCHAR; break; default: sprintf(buf, "(?)\n"); break; } return buf; } /* * Restore known gdb state. */ void restore_gdb_sanity(void) { if (!(pc->flags & GDB_INIT)) return; if (pc->output_radix) { *gdb_output_radix = pc->output_radix; *gdb_output_format = (*gdb_output_radix == 10) ? 0 : 'x'; } *gdb_prettyprint_structs = 1; /* these may piss somebody off... */ *gdb_repeat_count_threshold = 0x7fffffff; error_hook = NULL; if (st->flags & ADD_SYMBOL_FILE) { error(INFO, "%s\n gdb add-symbol-file command failed\n", st->current->mod_namelist); delete_load_module(st->current->mod_base); st->flags &= ~ADD_SYMBOL_FILE; } if (pc->cur_gdb_cmd) { pc->last_gdb_cmd = pc->cur_gdb_cmd; pc->cur_gdb_cmd = 0; } } /* * Check whether string in args[0] is a valid gdb command. */ int is_gdb_command(int merge_orig_args, ulong flags) { int retval; struct gnu_request *req; if (!args[0]) return FALSE; if (STREQ(args[0], "Q")) { args[0] = "q"; return TRUE; } if (is_restricted_command(args[0], flags)) return FALSE; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->buf = GETBUF(strlen(args[0])+1); req->command = GNU_COMMAND_EXISTS; req->name = args[0]; req->flags = GNU_RETURN_ON_ERROR; req->fp = pc->nullfp; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) retval = FALSE; else retval = req->value; FREEBUF(req->buf); FREEBUF(req); if (retval && merge_orig_args) { int i; for (i = argcnt; i; i--) args[i] = args[i-1]; args[0] = "gdb"; argcnt++; } return retval; } /* * Check whether a command is on the gdb-prohibited list. */ static char *prohibited_list[] = { "run", "r", "break", "b", "tbreak", "hbreak", "thbreak", "rbreak", "watch", "rwatch", "awatch", "attach", "continue", "c", "fg", "detach", "finish", "handle", "interrupt", "jump", "kill", "next", "nexti", "signal", "step", "s", "stepi", "target", "thread", "until", "delete", "clear", "disable", "enable", "condition", "ignore", "frame", "select-frame", "f", "up", "down", "catch", "tcatch", "return", "file", "exec-file", "core-file", "symbol-file", "load", "si", "ni", "shell", NULL /* must be last */ }; static char *restricted_list[] = { "define", "document", "while", "if", NULL /* must be last */ }; #define RESTRICTED_GDB_COMMAND \ "restricted gdb command: %s\n%s\"%s\" may only be used in a .gdbinit file or in a command file.\n%sThe .gdbinit file is read automatically during %s initialization.\n%sOther user-defined command files may be read interactively during\n%s%s runtime by using the gdb \"source\" command.\n" static int is_restricted_command(char *cmd, ulong flags) { int i; char *newline; for (i = 0; prohibited_list[i]; i++) { if (STREQ(prohibited_list[i], cmd)) { if (flags == RETURN_ON_ERROR) return TRUE; pc->curcmd = pc->program_name; error(FATAL, "prohibited gdb command: %s\n", cmd); } } for (i = 0; restricted_list[i]; i++) { if (STREQ(restricted_list[i], cmd)) { if (flags == RETURN_ON_ERROR) return TRUE; newline = space(strlen(pc->program_name)+2); pc->curcmd = pc->program_name; error(FATAL, RESTRICTED_GDB_COMMAND, cmd, newline, cmd, newline, pc->program_name, newline, newline, pc->program_name); } } return FALSE; } /* * Remove pipe/redirection stuff from the end of the command line. */ static void strip_redirection(char *buf) { char *p1, *p2; p1 = strstr_rightmost(buf, args[argcnt-1]); p2 = p1 + strlen(args[argcnt-1]); console("strip_redirection: [%s]\n", p2); if ((p1 = strpbrk(p2, "|!>"))) *p1 = NULLCHAR; strip_ending_whitespace(buf); } /* * Command for passing strings directly to gdb. */ void cmd_gdb(void) { char buf[BUFSIZE]; char **argv; argv = STREQ(args[0], "gdb") ? &args[1] : &args[0]; if (*argv == NULL) cmd_usage(pc->curcmd, SYNOPSIS); if (STREQ(*argv, "set") && argv[1]) { /* * Intercept set commands in case something has to be done * here or elsewhere. */ if (STREQ(argv[1], "gdb")) { cmd_set(); return; } if (STREQ(argv[1], "output-radix") && argv[2]) pc->output_radix = stol(argv[2], FAULT_ON_ERROR, NULL); } /* * If the command is not restricted, pass it on. */ if (!is_restricted_command(*argv, FAULT_ON_ERROR)) { if (STREQ(pc->command_line, "gdb")) { strcpy(buf, first_space(pc->orig_line)); strip_beginning_whitespace(buf); } else strcpy(buf, pc->orig_line); if (pc->redirect & (REDIRECT_TO_FILE|REDIRECT_TO_PIPE)) strip_redirection(buf); if (!gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR)) error(INFO, "gdb request failed: %s\n", buf); } } /* * The gdb target_xfer_memory() has a hook installed to re-route * all memory accesses back here; reads of 1 or 4 bytes come primarily * from text disassembly requests, and are diverted to the text cache. */ int gdb_readmem_callback(ulong addr, void *buf, int len, int write) { char locbuf[SIZEOF_32BIT], *p1; uint32_t *p2; int memtype; ulong readflags; if (write) return FALSE; if (pc->cur_req->flags & GNU_NO_READMEM) return TRUE; readflags = pc->curcmd_flags & PARTIAL_READ_OK ? RETURN_ON_ERROR|RETURN_PARTIAL : RETURN_ON_ERROR; if (pc->curcmd_flags & MEMTYPE_UVADDR) memtype = UVADDR; else if (pc->curcmd_flags & MEMTYPE_FILEADDR) memtype = FILEADDR; else if (!IS_KVADDR(addr)) { if (STREQ(pc->curcmd, "gdb") && STRNEQ(pc->cur_req->buf, "x/")) { memtype = UVADDR; } else { if (CRASHDEBUG(1)) console("gdb_readmem_callback: %lx %d FAILED\n", addr, len); return FALSE; } } else memtype = KVADDR; if (CRASHDEBUG(1)) console("gdb_readmem_callback[%d]: %lx %d\n", memtype, addr, len); if (memtype == FILEADDR) return(readmem(pc->curcmd_private, memtype, buf, len, "gdb_readmem_callback", readflags)); switch (len) { case SIZEOF_8BIT: p1 = (char *)buf; if ((memtype == KVADDR) && text_value_cache_byte(addr, (unsigned char *)p1)) return TRUE; if (!readmem(addr, memtype, locbuf, SIZEOF_32BIT, "gdb_readmem_callback", readflags)) return FALSE; *p1 = locbuf[0]; if (memtype == KVADDR) { p2 = (uint32_t *)locbuf; text_value_cache(addr, *p2, 0); } return TRUE; case SIZEOF_32BIT: if ((memtype == KVADDR) && text_value_cache(addr, 0, buf)) return TRUE; if (!readmem(addr, memtype, buf, SIZEOF_32BIT, "gdb_readmem callback", readflags)) return FALSE; if (memtype == KVADDR) text_value_cache(addr, (uint32_t)*((uint32_t *)buf), NULL); return TRUE; } return(readmem(addr, memtype, buf, len, "gdb_readmem_callback", readflags)); } /* * Machine-specific line-number pc section range verifier. */ int gdb_line_number_callback(ulong pc, ulong low, ulong high) { if (machdep->verify_line_number) return machdep->verify_line_number(pc, low, high); return TRUE; } /* * Prevent gdb from trying to translate and print pointers * that are not kernel virtual addresses. */ int gdb_print_callback(ulong addr) { if (!addr) return FALSE; else return IS_KVADDR(addr); } /* * Used by gdb_interface() to catch gdb-related errors, if desired. */ void gdb_error_hook(void) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; int buffers; if (CRASHDEBUG(2)) { sprintf(buf2, "\n"); if (CRASHDEBUG(5) && (buffers = get_embedded())) sprintf(buf2, "(%d buffer%s in use)\n", buffers, buffers > 1 ? "s" : ""); fprintf(stderr, "%s: returned via gdb_error_hook %s", gdb_command_string(pc->cur_gdb_cmd, buf1, TRUE), buf2); console("%s: returned via gdb_error_hook %s", gdb_command_string(pc->cur_gdb_cmd, buf1, TRUE), buf2); } #ifdef GDB_7_6 do_cleanups(all_cleanups()); #else do_cleanups(NULL); #endif longjmp(pc->gdb_interface_env, 1); } /* * gdb callback to access debug mode. */ int gdb_CRASHDEBUG(ulong dval) { if (CRASHDEBUG(dval)) return TRUE; return (pc->cur_req && (pc->cur_req->debug >= dval)); } static ulong gdb_user_print_option_address(char *name) { struct gnu_request request; request.command = GNU_USER_PRINT_OPTION; request.name = name; gdb_command_funnel(&request); return request.addr; } /* * Try to set a crash scope block based upon the vaddr. */ int gdb_set_crash_scope(ulong vaddr, char *arg) { struct gnu_request request, *req = &request; char name[BUFSIZE]; struct load_module *lm; if (!is_kernel_text(vaddr)) { error(INFO, "invalid text address: %s\n", arg); return FALSE; } if (module_symbol(vaddr, NULL, &lm, name, 0)) { if (!(lm->mod_flags & MOD_LOAD_SYMS)) { error(INFO, "attempting to find/load \"%s\" module debuginfo\n", lm->mod_name); if (!load_module_symbols_helper(lm->mod_name)) { error(INFO, "cannot find/load \"%s\" module debuginfo\n", lm->mod_name); return FALSE; } } } req->command = GNU_SET_CRASH_BLOCK; req->addr = vaddr; req->flags = 0; req->addr2 = 0; gdb_command_funnel(req); if (CRASHDEBUG(1)) fprintf(fp, "gdb_set_crash_scope: %s addr: %lx block: %lx\n", req->flags & GNU_COMMAND_FAILED ? "FAILED" : "OK", req->addr, req->addr2); if (req->flags & GNU_COMMAND_FAILED) { error(INFO, "gdb cannot find text block for address: %s\n", arg); return FALSE; } return TRUE; } #ifndef ALPHA /* * Stub routine needed for resolution by non-alpha, modified gdb code. */ int get_frame_offset(ulong pc) { return (error(FATAL, "get_frame_offset: invalid request for non-alpha systems!\n")); } #endif /* !ALPHA */ crash-7.1.4/gdb-7.6.patch0000664000000000000000000015741012634305150013417 0ustar rootroot--- gdb-7.6/libiberty/Makefile.in.orig +++ gdb-7.6/libiberty/Makefile.in @@ -175,6 +175,7 @@ REQUIRED_OFILES = \ ./getruntime.$(objext) ./hashtab.$(objext) ./hex.$(objext) \ ./lbasename.$(objext) ./lrealpath.$(objext) \ ./make-relative-prefix.$(objext) ./make-temp-file.$(objext) \ + ./mkstemps.$(objext) \ ./objalloc.$(objext) \ ./obstack.$(objext) \ ./partition.$(objext) ./pexecute.$(objext) ./physmem.$(objext) \ @@ -206,7 +207,7 @@ CONFIGURED_OFILES = ./asprintf.$(objext) ./index.$(objext) ./insque.$(objext) \ ./memchr.$(objext) ./memcmp.$(objext) ./memcpy.$(objext) \ ./memmem.$(objext) ./memmove.$(objext) \ - ./mempcpy.$(objext) ./memset.$(objext) ./mkstemps.$(objext) \ + ./mempcpy.$(objext) ./memset.$(objext) \ ./pex-djgpp.$(objext) ./pex-msdos.$(objext) \ ./pex-unix.$(objext) ./pex-win32.$(objext) \ ./putenv.$(objext) \ --- gdb-7.6/opcodes/i386-dis.c.orig +++ gdb-7.6/opcodes/i386-dis.c @@ -11510,6 +11510,10 @@ print_insn (bfd_vma pc, disassemble_info threebyte = *++codep; dp = &dis386_twobyte[threebyte]; need_modrm = twobyte_has_modrm[*codep]; + if (dp->name && ((strcmp(dp->name, "ud2a") == 0) || (strcmp(dp->name, "ud2") == 0))) { + extern int kernel_BUG_encoding_bytes(void); + codep += kernel_BUG_encoding_bytes(); + } codep++; } else --- gdb-7.6/gdb/dwarf2read.c.orig +++ gdb-7.6/gdb/dwarf2read.c @@ -2670,7 +2670,11 @@ read_index_from_section (struct objfile indices. */ if (version < 4) { +#ifdef CRASH_MERGE + static int warning_printed = 1; +#else static int warning_printed = 0; +#endif if (!warning_printed) { warning (_("Skipping obsolete .gdb_index section in %s."), @@ -2689,7 +2693,11 @@ read_index_from_section (struct objfile "set use-deprecated-index-sections on". */ if (version < 6 && !deprecated_ok) { +#ifdef CRASH_MERGE + static int warning_printed = 1; +#else static int warning_printed = 0; +#endif if (!warning_printed) { warning (_("\ --- gdb-7.6/gdb/amd64-linux-nat.c.orig +++ gdb-7.6/gdb/amd64-linux-nat.c @@ -45,6 +45,17 @@ /* ezannoni-2003-07-09: I think this is fixed. The extraneous defs have been removed from ptrace.h in the kernel. However, better safe than sorry. */ +#ifdef CRASH_MERGE +/* + * When compiling within a 2.6.25-based Fedora build environment with + * gcc 4.3, four new "typedef unsigned int u32;" declarations were + * required due to a new ptrace_bts_config structure declaration in + * "asm-x86/ptrace-abi.h" that used u32 members, but u32 is defined in + * "asm-x86/types.h" within a __KERNEL__ section. They've been changed + * to __u32, but this patch remains for building in that environment. + */ +typedef unsigned int u32; +#endif #include #include #include "gdb_proc_service.h" --- gdb-7.6/gdb/symfile.c.orig +++ gdb-7.6/gdb/symfile.c @@ -693,7 +693,26 @@ default_symfile_offsets (struct objfile for (cur_sec = abfd->sections; cur_sec != NULL; cur_sec = cur_sec->next) /* We do not expect this to happen; just skip this step if the relocatable file has a section with an assigned VMA. */ - if (bfd_section_vma (abfd, cur_sec) != 0) + if (bfd_section_vma (abfd, cur_sec) != 0 + /* + * Kernel modules may have some non-zero VMAs, i.e., like the + * __ksymtab and __ksymtab_gpl sections in this example: + * + * Section Headers: + * [Nr] Name Type Address Offset + * Size EntSize Flags Link Info Align + * ... + * [ 8] __ksymtab PROGBITS 0000000000000060 0000ad90 + * 0000000000000010 0000000000000000 A 0 0 16 + * [ 9] .rela__ksymtab RELA 0000000000000000 0000ada0 + * 0000000000000030 0000000000000018 43 8 8 + * [10] __ksymtab_gpl PROGBITS 0000000000000070 0000add0 + * 00000000000001a0 0000000000000000 A 0 0 16 + * ... + * + * but they should be treated as if they are NULL. + */ + && strncmp (bfd_get_section_name (abfd, cur_sec), "__k", 3) != 0) break; if (cur_sec == NULL) @@ -1122,6 +1141,12 @@ symbol_file_add_with_addrs_or_offsets (b error (_("Not confirmed.")); objfile = allocate_objfile (abfd, flags | (mainline ? OBJF_MAINLINE : 0)); +#ifdef CRASH_MERGE + if (add_flags & SYMFILE_MAINLINE) { + extern struct objfile *gdb_kernel_objfile; + gdb_kernel_objfile = objfile; + } +#endif if (parent) add_separate_debug_objfile (objfile, parent); @@ -1484,6 +1509,9 @@ find_separate_debug_file (const char *di VEC (char_ptr) *debugdir_vec; struct cleanup *back_to; int ix; +#ifdef CRASH_MERGE + extern int check_specified_module_tree(char *, char *); +#endif /* Set I to max (strlen (canon_dir), strlen (dir)). */ i = strlen (dir); @@ -1513,6 +1541,15 @@ find_separate_debug_file (const char *di if (separate_debug_file_exists (debugfile, crc32, objfile)) return debugfile; +#ifdef CRASH_MERGE +{ + if (check_specified_module_tree(objfile->name, debugfile) && + separate_debug_file_exists(debugfile, crc32, objfile)) { + return debugfile; + } +} +#endif + /* Then try in the global debugfile directories. Keep backward compatibility so that DEBUG_FILE_DIRECTORY being "" will @@ -1583,6 +1620,10 @@ find_separate_debug_file_by_debuglink (s char *debugfile; unsigned long crc32; struct cleanup *cleanups; +#ifdef CRASH_MERGE + char *name_copy; + extern char *check_specified_kernel_debug_file(); +#endif debuglink = get_debug_link_info (objfile, &crc32); @@ -1635,6 +1676,12 @@ find_separate_debug_file_by_debuglink (s } do_cleanups (cleanups); +#ifdef CRASH_MERGE + if (debugfile == NULL) { + name_copy = check_specified_kernel_debug_file(); + return (name_copy ? xstrdup(name_copy) : NULL); + } +#endif return debugfile; } @@ -2409,8 +2456,10 @@ add_symbol_file_command (char *args, int so we can't determine what section names are valid. */ } +#ifndef CRASH_MERGE if (from_tty && (!query ("%s", ""))) error (_("Not confirmed.")); +#endif symbol_file_add (filename, from_tty ? SYMFILE_VERBOSE : 0, section_addrs, flags); @@ -3690,6 +3739,15 @@ bfd_byte * symfile_relocate_debug_section (struct objfile *objfile, asection *sectp, bfd_byte *buf) { +#ifdef CRASH_MERGE + /* Executable files have all the relocations already resolved. + * Handle files linked with --emit-relocs. + * http://sources.redhat.com/ml/gdb/2006-08/msg00137.html + */ + bfd *abfd = objfile->obfd; + if ((abfd->flags & EXEC_P) != 0) + return NULL; +#endif gdb_assert (objfile->sf->sym_relocate); return (*objfile->sf->sym_relocate) (objfile, sectp, buf); --- gdb-7.6/gdb/cli/cli-cmds.c.orig +++ gdb-7.6/gdb/cli/cli-cmds.c @@ -466,6 +466,10 @@ show_script_ext_mode (struct ui_file *fi If SEARCH_PATH is non-zero, and the file isn't found in cwd, search for it in the source search path. */ +#ifdef CRASH_MERGE +static int crash_from_tty = 0; +#endif + int find_and_open_script (const char *script_file, int search_path, FILE **streamp, char **full_pathp) @@ -508,6 +512,32 @@ find_and_open_script (const char *script return 0; } +#ifdef CRASH_MERGE + /* + * Only allow trusted versions of .gdbinit files to be + * sourced during session initialization. + */ + if (crash_from_tty == -1) + { + struct stat statbuf; + FILE *stream = *streamp; + int fd = fileno (stream); + if (fstat (fd, &statbuf) < 0) + { + perror_with_name (*full_pathp); + fclose (stream); + return 0; + } + if (statbuf.st_uid != getuid () || (statbuf.st_mode & S_IWOTH)) + { + extern void untrusted_file(FILE *, char *); + untrusted_file(NULL, *full_pathp); + fclose (stream); + return 0; + } + } +#endif + return 1; } @@ -566,7 +596,11 @@ source_script_with_search (const char *f If the source command was invoked interactively, throw an error. Otherwise (e.g. if it was invoked by a script), silently ignore the error. */ +#ifdef CRASH_MERGE + if (from_tty > 0) +#else if (from_tty) +#endif perror_with_name (file); else return; @@ -589,7 +623,14 @@ source_script_with_search (const char *f void source_script (char *file, int from_tty) { +#ifdef CRASH_MERGE + crash_from_tty = from_tty; +#endif source_script_with_search (file, from_tty, 0); +#ifdef CRASH_MERGE + crash_from_tty = 0; +#endif + } /* Return the source_verbose global variable to its previous state --- gdb-7.6/gdb/psymtab.c.orig +++ gdb-7.6/gdb/psymtab.c @@ -305,10 +305,14 @@ find_pc_sect_psymtab (struct objfile *ob struct minimal_symbol *msymbol) { struct partial_symtab *pst; +#ifdef CRASH_MERGE + extern int gdb_line_number_callback(unsigned long, unsigned long, unsigned long); +#endif /* Try just the PSYMTABS_ADDRMAP mapping first as it has better granularity than the later used TEXTLOW/TEXTHIGH one. */ +#ifndef __i386__ if (objfile->psymtabs_addrmap != NULL) { pst = addrmap_find (objfile->psymtabs_addrmap, pc); @@ -343,6 +347,7 @@ find_pc_sect_psymtab (struct objfile *ob } next: +#endif /* Existing PSYMTABS_ADDRMAP mapping is present even for PARTIAL_SYMTABs which still have no corresponding full SYMTABs read. But it is not @@ -361,7 +366,12 @@ find_pc_sect_psymtab (struct objfile *ob best_pst = find_pc_sect_psymtab_closer (objfile, pc, section, pst, msymbol); +#ifdef CRASH_MERGE + if ((best_pst != NULL) && + gdb_line_number_callback(pc, pst->textlow, pst->texthigh)) +#else if (best_pst != NULL) +#endif return best_pst; } --- gdb-7.6/gdb/symtab.c.orig +++ gdb-7.6/gdb/symtab.c @@ -1198,7 +1198,9 @@ demangle_for_lookup (const char *name, e doesn't affect these calls since they are looking for a known variable and thus can probably assume it will never hit the C++ code). */ - +#ifdef CRASH_MERGE +static void gdb_bait_and_switch(char *, struct symbol *); +#endif struct symbol * lookup_symbol_in_language (const char *name, const struct block *block, const domain_enum domain, enum language lang, @@ -1212,17 +1214,30 @@ lookup_symbol_in_language (const char *n is_a_field_of_this); do_cleanups (cleanup); +#ifdef CRASH_MERGE + if (returnval && (domain == VAR_DOMAIN)) + gdb_bait_and_switch((char *)modified_name, returnval); +#endif + return returnval; } /* Behave like lookup_symbol_in_language, but performed with the current language. */ +#ifdef CRASH_MERGE +static struct block *gdb_get_crash_block(void); +#endif + struct symbol * lookup_symbol (const char *name, const struct block *block, domain_enum domain, struct field_of_this_result *is_a_field_of_this) { +#ifdef CRASH_MERGE + if (!block) + block = gdb_get_crash_block(); +#endif return lookup_symbol_in_language (name, block, domain, current_language->la_language, is_a_field_of_this); @@ -5100,3 +5115,662 @@ When enabled, debugging messages are pri observer_attach_executable_changed (symtab_observer_executable_changed); } + +#ifdef CRASH_MERGE +#include "gdb-stabs.h" +#include "version.h" +#define GDB_COMMON +#include "../../defs.h" + +static void get_member_data(struct gnu_request *, struct type *); +static void dump_enum(struct type *, struct gnu_request *); +static void eval_enum(struct type *, struct gnu_request *); +static void gdb_get_line_number(struct gnu_request *); +static void gdb_get_datatype(struct gnu_request *); +static void gdb_get_symbol_type(struct gnu_request *); +static void gdb_command_exists(struct gnu_request *); +static void gdb_debug_command(struct gnu_request *); +static void gdb_function_numargs(struct gnu_request *); +static void gdb_add_symbol_file(struct gnu_request *); +static void gdb_delete_symbol_file(struct gnu_request *); +static void gdb_patch_symbol_values(struct gnu_request *); +extern void replace_ui_file_FILE(struct ui_file *, FILE *); +static void get_user_print_option_address(struct gnu_request *); +extern int get_frame_offset(CORE_ADDR); +static void gdb_set_crash_block(struct gnu_request *); +void gdb_command_funnel(struct gnu_request *); + +struct objfile *gdb_kernel_objfile = { 0 }; + +static ulong gdb_merge_flags = 0; +#define KERNEL_SYMBOLS_PATCHED (0x1) + +#undef STREQ +#define STREQ(A, B) (A && B && (strcmp(A, B) == 0)) + +/* + * All commands from above come through here. + */ +void +gdb_command_funnel(struct gnu_request *req) +{ + struct symbol *sym; + + if (req->command != GNU_VERSION) { + replace_ui_file_FILE(gdb_stdout, req->fp); + replace_ui_file_FILE(gdb_stderr, req->fp); + do_cleanups(all_cleanups()); + } + + switch (req->command) + { + case GNU_VERSION: + req->buf = (char *)version; + break; + + case GNU_PASS_THROUGH: + execute_command(req->buf, + req->flags & GNU_FROM_TTY_OFF ? FALSE : TRUE); + break; + + case GNU_USER_PRINT_OPTION: + get_user_print_option_address(req); + break; + + case GNU_RESOLVE_TEXT_ADDR: + sym = find_pc_function(req->addr); + if (!sym || TYPE_CODE(sym->type) != TYPE_CODE_FUNC) + req->flags |= GNU_COMMAND_FAILED; + break; + + case GNU_DISASSEMBLE: + if (req->addr2) + sprintf(req->buf, "disassemble 0x%lx 0x%lx", + req->addr, req->addr2); + else + sprintf(req->buf, "disassemble 0x%lx", req->addr); + execute_command(req->buf, TRUE); + break; + + case GNU_ADD_SYMBOL_FILE: + gdb_add_symbol_file(req); + break; + + case GNU_DELETE_SYMBOL_FILE: + gdb_delete_symbol_file(req); + break; + + case GNU_GET_LINE_NUMBER: + gdb_get_line_number(req); + break; + + case GNU_GET_DATATYPE: + gdb_get_datatype(req); + break; + + case GNU_GET_SYMBOL_TYPE: + gdb_get_symbol_type(req); + break; + + case GNU_COMMAND_EXISTS: + gdb_command_exists(req); + break; + + case GNU_ALPHA_FRAME_OFFSET: + req->value = 0; + break; + + case GNU_FUNCTION_NUMARGS: + gdb_function_numargs(req); + break; + + case GNU_DEBUG_COMMAND: + gdb_debug_command(req); + break; + + case GNU_PATCH_SYMBOL_VALUES: + gdb_patch_symbol_values(req); + break; + + case GNU_SET_CRASH_BLOCK: + gdb_set_crash_block(req); + break; + + default: + req->flags |= GNU_COMMAND_FAILED; + break; + } +} + +/* + * Given a PC value, return the file and line number. + */ +static void +gdb_get_line_number(struct gnu_request *req) +{ + struct symtab_and_line sal; + struct symbol *sym; + CORE_ADDR pc; + +#define LASTCHAR(s) (s[strlen(s)-1]) + + /* + * Prime the addrmap pump. + */ + if (req->name) + sym = lookup_symbol(req->name, 0, VAR_DOMAIN, 0); + + pc = req->addr; + + sal = find_pc_line(pc, 0); + + if (!sal.symtab) { + req->buf[0] = '\0'; + return; + } + + if (sal.symtab->filename && sal.symtab->dirname) { + if (sal.symtab->filename[0] == '/') + sprintf(req->buf, "%s: %d", + sal.symtab->filename, sal.line); + else + sprintf(req->buf, "%s%s%s: %d", + sal.symtab->dirname, + LASTCHAR(sal.symtab->dirname) == '/' ? "" : "/", + sal.symtab->filename, sal.line); + } +} + + +/* + * General purpose routine for determining datatypes. + */ + +static void +gdb_get_datatype(struct gnu_request *req) +{ + register struct cleanup *old_chain = NULL; + register struct type *type; + register struct type *typedef_type; + struct expression *expr; + struct symbol *sym; + register int i; + struct field *nextfield; + struct value *val; + + if (gdb_CRASHDEBUG(2)) + console("gdb_get_datatype [%s] (a)\n", req->name); + + req->typecode = TYPE_CODE_UNDEF; + + /* + * lookup_symbol() will pick up struct and union names. + */ + sym = lookup_symbol(req->name, 0, STRUCT_DOMAIN, 0); + if (sym) { + req->typecode = TYPE_CODE(sym->type); + req->length = TYPE_LENGTH(sym->type); + if (req->member) + get_member_data(req, sym->type); + + if (TYPE_CODE(sym->type) == TYPE_CODE_ENUM) { + if (req->flags & GNU_PRINT_ENUMERATORS) + dump_enum(sym->type, req); + } + + return; + } + + /* + * Otherwise parse the expression. + */ + if (gdb_CRASHDEBUG(2)) + console("gdb_get_datatype [%s] (b)\n", req->name); + + expr = parse_expression(req->name); + + old_chain = make_cleanup(free_current_contents, &expr); + + + switch (expr->elts[0].opcode) + { + case OP_VAR_VALUE: + if (gdb_CRASHDEBUG(2)) + console("expr->elts[0].opcode: OP_VAR_VALUE\n"); + type = expr->elts[2].symbol->type; + if (req->flags & GNU_VAR_LENGTH_TYPECODE) { + req->typecode = TYPE_CODE(type); + req->length = TYPE_LENGTH(type); + } + if (TYPE_CODE(type) == TYPE_CODE_ENUM) { + req->typecode = TYPE_CODE(type); + req->value = SYMBOL_VALUE(expr->elts[2].symbol); + req->tagname = (char *)TYPE_TAG_NAME(type); + if (!req->tagname) { + val = evaluate_type(expr); + eval_enum(value_type(val), req); + } + } + break; + + case OP_TYPE: + if (gdb_CRASHDEBUG(2)) + console("expr->elts[0].opcode: OP_TYPE\n"); + type = expr->elts[1].type; + + req->typecode = TYPE_CODE(type); + req->length = TYPE_LENGTH(type); + + if (TYPE_CODE(type) == TYPE_CODE_TYPEDEF) { + req->is_typedef = TYPE_CODE_TYPEDEF; + if ((typedef_type = check_typedef(type))) { + req->typecode = TYPE_CODE(typedef_type); + req->length = TYPE_LENGTH(typedef_type); + type = typedef_type; + } + } + + if (TYPE_CODE(type) == TYPE_CODE_ENUM) { + if (req->is_typedef) + if (req->flags & GNU_PRINT_ENUMERATORS) { + if (req->is_typedef) + fprintf_filtered(gdb_stdout, + "typedef "); + dump_enum(type, req); + } + } + + if (req->member) + get_member_data(req, type); + + break; + + default: + if (gdb_CRASHDEBUG(2)) + console("expr->elts[0].opcode: %d (?)\n", + expr->elts[0].opcode); + break; + + } + + do_cleanups(old_chain); +} + +/* + * More robust enum list dump that gdb's, showing the value of each + * identifier, each on its own line. + */ +static void +dump_enum(struct type *type, struct gnu_request *req) +{ + register int i; + int len; + int lastval; + + len = TYPE_NFIELDS (type); + lastval = 0; + if (TYPE_TAG_NAME(type)) + fprintf_filtered(gdb_stdout, + "enum %s {\n", TYPE_TAG_NAME (type)); + else + fprintf_filtered(gdb_stdout, "enum {\n"); + + for (i = 0; i < len; i++) { + fprintf_filtered(gdb_stdout, " %s", + TYPE_FIELD_NAME (type, i)); + if (lastval != TYPE_FIELD_BITPOS (type, i)) { + fprintf_filtered (gdb_stdout, " = %d", + TYPE_FIELD_BITPOS (type, i)); + lastval = TYPE_FIELD_BITPOS (type, i); + } else + fprintf_filtered(gdb_stdout, " = %d", lastval); + fprintf_filtered(gdb_stdout, "\n"); + lastval++; + } + if (TYPE_TAG_NAME(type)) + fprintf_filtered(gdb_stdout, "};\n"); + else + fprintf_filtered(gdb_stdout, "} %s;\n", req->name); +} + +/* + * Given an enum type with no tagname, determine its value. + */ +static void +eval_enum(struct type *type, struct gnu_request *req) +{ + register int i; + int len; + int lastval; + + len = TYPE_NFIELDS (type); + lastval = 0; + + for (i = 0; i < len; i++) { + if (lastval != TYPE_FIELD_BITPOS (type, i)) { + lastval = TYPE_FIELD_BITPOS (type, i); + } + if (STREQ(TYPE_FIELD_NAME(type, i), req->name)) { + req->tagname = "(unknown)"; + req->value = lastval; + return; + } + lastval++; + } +} + +/* + * Walk through a struct type's list of fields looking for the desired + * member field, and when found, return its relevant data. + */ +static void +get_member_data(struct gnu_request *req, struct type *type) +{ + register short i; + struct field *nextfield; + short nfields; + struct type *typedef_type; + + req->member_offset = -1; + + nfields = TYPE_MAIN_TYPE(type)->nfields; + nextfield = TYPE_MAIN_TYPE(type)->flds_bnds.fields; + + if (nfields == 0) { + struct type *newtype; + newtype = lookup_transparent_type(req->name); + if (newtype) { + console("get_member_data(%s.%s): switching type from %lx to %lx\n", + req->name, req->member, type, newtype); + nfields = TYPE_MAIN_TYPE(newtype)->nfields; + nextfield = TYPE_MAIN_TYPE(newtype)->flds_bnds.fields; + } + } + + for (i = 0; i < nfields; i++) { + if (STREQ(req->member, nextfield->name)) { + req->member_offset = nextfield->loc.bitpos; + req->member_length = TYPE_LENGTH(nextfield->type); + req->member_typecode = TYPE_CODE(nextfield->type); + if ((req->member_typecode == TYPE_CODE_TYPEDEF) && + (typedef_type = check_typedef(nextfield->type))) + req->member_length = TYPE_LENGTH(typedef_type); + return; + } + nextfield++; + } +} + +/* + * Check whether a command exists. If it doesn't, the command will be + * returned indirectly via the error_hook. + */ +static void +gdb_command_exists(struct gnu_request *req) +{ + extern struct cmd_list_element *cmdlist; + register struct cmd_list_element *c; + + req->value = FALSE; + c = lookup_cmd(&req->name, cmdlist, "", 0, 1); + req->value = TRUE; +} + +static void +gdb_function_numargs(struct gnu_request *req) +{ + struct symbol *sym; + + sym = find_pc_function(req->pc); + + if (!sym || TYPE_CODE(sym->type) != TYPE_CODE_FUNC) { + req->flags |= GNU_COMMAND_FAILED; + return; + } + + req->value = (ulong)TYPE_NFIELDS(sym->type); +} + +struct load_module *gdb_current_load_module = NULL; + +static void +gdb_add_symbol_file(struct gnu_request *req) +{ + register struct objfile *loaded_objfile = NULL; + register struct objfile *objfile; + register struct minimal_symbol *m; + struct load_module *lm; + int external, subsequent, found; + off_t offset; + ulong value, adjusted; + struct symbol *sym; + struct expression *expr; + struct cleanup *old_chain; + int i; + int allsect = 0; + char *secname; + char buf[80]; + + gdb_current_load_module = lm = (struct load_module *)req->addr; + + req->name = lm->mod_namelist; + gdb_delete_symbol_file(req); + + if ((lm->mod_flags & MOD_NOPATCH) == 0) { + for (i = 0 ; i < lm->mod_sections; i++) { + if (STREQ(lm->mod_section_data[i].name, ".text") && + (lm->mod_section_data[i].flags & SEC_FOUND)) + allsect = 1; + } + + if (!allsect) { + sprintf(req->buf, "add-symbol-file %s 0x%lx %s", lm->mod_namelist, + lm->mod_text_start ? lm->mod_text_start : lm->mod_base, + lm->mod_flags & MOD_DO_READNOW ? "-readnow" : ""); + if (lm->mod_data_start) { + sprintf(buf, " -s .data 0x%lx", lm->mod_data_start); + strcat(req->buf, buf); + } + if (lm->mod_bss_start) { + sprintf(buf, " -s .bss 0x%lx", lm->mod_bss_start); + strcat(req->buf, buf); + } + if (lm->mod_rodata_start) { + sprintf(buf, " -s .rodata 0x%lx", lm->mod_rodata_start); + strcat(req->buf, buf); + } + } else { + sprintf(req->buf, "add-symbol-file %s 0x%lx %s", lm->mod_namelist, + lm->mod_text_start, lm->mod_flags & MOD_DO_READNOW ? + "-readnow" : ""); + for (i = 0; i < lm->mod_sections; i++) { + secname = lm->mod_section_data[i].name; + if ((lm->mod_section_data[i].flags & SEC_FOUND) && + !STREQ(secname, ".text")) { + sprintf(buf, " -s %s 0x%lx", secname, + lm->mod_section_data[i].offset + lm->mod_base); + strcat(req->buf, buf); + } + } + } + } + + if (gdb_CRASHDEBUG(1)) + fprintf_filtered(gdb_stdout, "%s\n", req->buf); + + execute_command(req->buf, FALSE); + + ALL_OBJFILES(objfile) { + if (same_file(objfile->name, lm->mod_namelist)) { + loaded_objfile = objfile; + break; + } + } + + if (!loaded_objfile) + req->flags |= GNU_COMMAND_FAILED; +} + +static void +gdb_delete_symbol_file(struct gnu_request *req) +{ + register struct objfile *objfile; + + ALL_OBJFILES(objfile) { + if (STREQ(objfile->name, req->name) || + same_file(objfile->name, req->name)) { + free_objfile(objfile); + break; + } + } + + if (gdb_CRASHDEBUG(2)) { + fprintf_filtered(gdb_stdout, "current object files:\n"); + ALL_OBJFILES(objfile) + fprintf_filtered(gdb_stdout, " %s\n", objfile->name); + } +} + +/* + * Walk through all minimal_symbols, patching their values with the + * correct addresses. + */ +static void +gdb_patch_symbol_values(struct gnu_request *req) +{ + struct minimal_symbol *msymbol; + struct objfile *objfile; + + req->name = PATCH_KERNEL_SYMBOLS_START; + patch_kernel_symbol(req); + + ALL_MSYMBOLS (objfile, msymbol) + { + req->name = (char *)msymbol->ginfo.name; + req->addr = (ulong)(&SYMBOL_VALUE_ADDRESS(msymbol)); + if (!patch_kernel_symbol(req)) { + req->flags |= GNU_COMMAND_FAILED; + break; + } + } + + req->name = PATCH_KERNEL_SYMBOLS_STOP; + patch_kernel_symbol(req); + + clear_symtab_users(0); + gdb_merge_flags |= KERNEL_SYMBOLS_PATCHED; +} + +static void +gdb_get_symbol_type(struct gnu_request *req) +{ + struct expression *expr; + struct value *val; + struct cleanup *old_chain = NULL; + struct type *type; + struct type *target_type; + + req->typecode = TYPE_CODE_UNDEF; + + expr = parse_expression (req->name); + old_chain = make_cleanup (free_current_contents, &expr); + val = evaluate_type (expr); + + type = value_type(val); + + req->type_name = (char *)TYPE_MAIN_TYPE(type)->name; + req->typecode = TYPE_MAIN_TYPE(type)->code; + req->length = type->length; + target_type = TYPE_MAIN_TYPE(type)->target_type; + + if (target_type) { + req->target_typename = (char *)TYPE_MAIN_TYPE(target_type)->name; + req->target_typecode = TYPE_MAIN_TYPE(target_type)->code; + req->target_length = target_type->length; + } + + if (req->member) + get_member_data(req, type); + + do_cleanups (old_chain); +} + +static void +gdb_debug_command(struct gnu_request *req) +{ + +} + +/* + * Only necessary on "patched" kernel symbol sessions, and called only by + * lookup_symbol(), pull a symbol value bait-and-switch operation by altering + * either a data symbol's address value or a text symbol's block start address. + */ +static void +gdb_bait_and_switch(char *name, struct symbol *sym) +{ + struct minimal_symbol *msym; + struct block *block; + + if ((gdb_merge_flags & KERNEL_SYMBOLS_PATCHED) && + (msym = lookup_minimal_symbol(name, NULL, gdb_kernel_objfile))) { + if (sym->aclass == LOC_BLOCK) { + block = (struct block *)SYMBOL_BLOCK_VALUE(sym); + BLOCK_START(block) = SYMBOL_VALUE_ADDRESS(msym); + } else + SYMBOL_VALUE_ADDRESS(sym) = SYMBOL_VALUE_ADDRESS(msym); + } +} + +#include "valprint.h" + +void +get_user_print_option_address(struct gnu_request *req) +{ + extern struct value_print_options user_print_options; + + req->addr = 0; + + if (strcmp(req->name, "output_format") == 0) + req->addr = (ulong)&user_print_options.output_format; + if (strcmp(req->name, "print_max") == 0) + req->addr = (ulong)&user_print_options.print_max; + if (strcmp(req->name, "prettyprint_structs") == 0) + req->addr = (ulong)&user_print_options.prettyprint_structs; + if (strcmp(req->name, "prettyprint_arrays") == 0) + req->addr = (ulong)&user_print_options.prettyprint_arrays; + if (strcmp(req->name, "repeat_count_threshold") == 0) + req->addr = (ulong)&user_print_options.repeat_count_threshold; + if (strcmp(req->name, "stop_print_at_null") == 0) + req->addr = (ulong)&user_print_options.stop_print_at_null; + if (strcmp(req->name, "output_radix") == 0) + req->addr = (ulong)&output_radix; +} + +CORE_ADDR crash_text_scope; + +static void +gdb_set_crash_block(struct gnu_request *req) +{ + if (!req->addr) { /* debug */ + crash_text_scope = 0; + return; + } + + if ((req->addr2 = (ulong)block_for_pc(req->addr))) + crash_text_scope = req->addr; + else { + crash_text_scope = 0; + req->flags |= GNU_COMMAND_FAILED; + } +} + +static struct block * +gdb_get_crash_block(void) +{ + if (crash_text_scope) + return block_for_pc(crash_text_scope); + else + return NULL; +} +#endif --- gdb-7.6/gdb/c-typeprint.c.orig +++ gdb-7.6/gdb/c-typeprint.c @@ -1097,7 +1097,8 @@ c_type_print_base (struct type *type, st fprintf_filtered (stream, "static "); c_print_type (TYPE_FIELD_TYPE (type, i), TYPE_FIELD_NAME (type, i), - stream, show - 1, level + 4, + stream, strlen(TYPE_FIELD_NAME (type, i)) ? + show - 1 : show, level + 4, &local_flags); if (!field_is_static (&TYPE_FIELD (type, i)) && TYPE_FIELD_PACKED (type, i)) --- gdb-7.6/gdb/xml-syscall.c.orig +++ gdb-7.6/gdb/xml-syscall.c @@ -38,7 +38,11 @@ static void syscall_warn_user (void) { +#ifdef CRASH_MERGE + static int have_warned = 1; +#else static int have_warned = 0; +#endif if (!have_warned) { have_warned = 1; --- gdb-7.6/gdb/exceptions.c.orig +++ gdb-7.6/gdb/exceptions.c @@ -218,6 +218,10 @@ exceptions_state_mc_action_iter_1 (void) /* Return EXCEPTION to the nearest containing catch_errors(). */ +#ifdef CRASH_MERGE +void (*error_hook) (void) ATTRIBUTE_NORETURN; +#endif + void throw_exception (struct gdb_exception exception) { @@ -225,6 +229,13 @@ throw_exception (struct gdb_exception ex immediate_quit = 0; do_cleanups (all_cleanups ()); +#ifdef CRASH_MERGE + if (error_hook) { + fprintf_filtered(gdb_stderr, "%s\n", exception.message); + (*error_hook)(); + } else + fprintf_filtered(gdb_stderr, "gdb called without error_hook: %s\n", exception.message); +#endif /* Jump to the containing catch_errors() call, communicating REASON to that call via setjmp's return value. Note that REASON can't --- gdb-7.6/gdb/valprint.h.orig +++ gdb-7.6/gdb/valprint.h @@ -152,11 +152,17 @@ extern void print_function_pointer_addre struct gdbarch *gdbarch, CORE_ADDR address, struct ui_file *stream); - +#ifdef CRASH_MERGE +extern int valprint_read_string (CORE_ADDR addr, int len, int width, + unsigned int fetchlimit, + enum bfd_endian byte_order, gdb_byte **buffer, + int *bytes_read); +#else extern int read_string (CORE_ADDR addr, int len, int width, unsigned int fetchlimit, enum bfd_endian byte_order, gdb_byte **buffer, int *bytes_read); +#endif extern void val_print_optimized_out (struct ui_file *stream); --- gdb-7.6/gdb/target.c.orig +++ gdb-7.6/gdb/target.c @@ -1779,6 +1779,13 @@ target_xfer_partial (struct target_ops * int target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len) { +#ifdef CRASH_MERGE + extern int gdb_readmem_callback(unsigned long, void *, int, int); + if (gdb_readmem_callback(memaddr, (void *)myaddr, len, 0)) + return 0; + else + return EIO; +#endif /* Dispatch to the topmost target, not the flattened current_target. Memory accesses check target->to_has_(all_)memory, and the flattened target doesn't inherit those. */ @@ -1814,6 +1821,13 @@ target_read_stack (CORE_ADDR memaddr, gd int target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len) { +#ifdef CRASH_MERGE + extern int gdb_readmem_callback(unsigned long, void *, int, int); + if (gdb_readmem_callback(memaddr, (void *)myaddr, len, 1)) + return 0; + else + return EIO; +#endif /* Dispatch to the topmost target, not the flattened current_target. Memory accesses check target->to_has_(all_)memory, and the flattened target doesn't inherit those. */ --- gdb-7.6/gdb/printcmd.c.orig +++ gdb-7.6/gdb/printcmd.c @@ -1001,11 +1001,62 @@ print_command_1 (char *exp, int voidprin } static void +print_command_2 (char *exp, int inspect, int voidprint) +{ + struct expression *expr; + struct cleanup *old_chain = 0; + char format = 0; + struct value *val; + struct format_data fmt; + int cleanup = 0; + + if (exp && *exp == '/') + { + exp++; + fmt = decode_format (&exp, last_format, 0); + validate_format (fmt, "print"); + last_format = format = fmt.format; + } + else + { + fmt.count = 1; + fmt.format = 0; + fmt.size = 0; + fmt.raw = 0; + } + + if (exp && *exp) + { + expr = parse_expression (exp); + old_chain = make_cleanup (free_current_contents, &expr); + cleanup = 1; + val = evaluate_expression (expr); + } + else + val = access_value_history (0); + + printf_filtered ("%d %d %d %d %d %d\n", + TYPE_CODE (check_typedef(value_type (val))), + TYPE_UNSIGNED (check_typedef(value_type (val))), + TYPE_LENGTH (check_typedef(value_type(val))), + value_offset (val), value_bitpos (val), value_bitsize(val)); + + if (cleanup) + do_cleanups (old_chain); +} + +static void print_command (char *exp, int from_tty) { print_command_1 (exp, 1); } +static void +printm_command (char *exp, int from_tty) +{ + print_command_2 (exp, 0, 1); +} + /* Same as print, except it doesn't print void results. */ static void call_command (char *exp, int from_tty) @@ -2593,6 +2644,12 @@ EXP may be preceded with /FMT, where FMT but no count or size letter (see \"x\" command).")); set_cmd_completer (c, expression_completer); add_com_alias ("p", "print", class_vars, 1); + + c = add_com ("printm", class_vars, printm_command, _("\ +Similar to \"print\" command, but it used to print the type, size, offset,\n\ +bitpos and bitsize of the expression EXP.")); + set_cmd_completer (c, expression_completer); + add_com_alias ("inspect", "print", class_vars, 1); add_setshow_uinteger_cmd ("max-symbolic-offset", no_class, --- gdb-7.6/gdb/ui-file.c.orig +++ gdb-7.6/gdb/ui-file.c @@ -671,6 +671,17 @@ gdb_fopen (char *name, char *mode) return stdio_file_new (f, 1); } +#ifdef CRASH_MERGE +void +replace_ui_file_FILE(struct ui_file *file, FILE *fp) +{ + struct stdio_file *stdio_file; + + stdio_file = (struct stdio_file *)ui_file_data(file); + stdio_file->file = fp; +} +#endif + /* ``struct ui_file'' implementation that maps onto two ui-file objects. */ static ui_file_write_ftype tee_file_write; --- gdb-7.6/gdb/main.c.orig +++ gdb-7.6/gdb/main.c @@ -806,7 +806,7 @@ captured_main (void *data) { print_gdb_version (gdb_stdout); wrap_here (""); - printf_filtered ("\n"); + printf_filtered ("\n\n"); exit (0); } @@ -853,6 +853,13 @@ captured_main (void *data) } } +#ifdef CRASH_MERGE +{ + extern void update_gdb_hooks(void); + update_gdb_hooks(); +} +#endif + /* FIXME: cagney/2003-02-03: The big hack (part 2 of 2) that lets GDB retain the old MI1 interpreter startup behavior. Output the copyright message after the interpreter is installed when it is @@ -880,7 +887,11 @@ captured_main (void *data) processed; it sets global parameters, which are independent of what file you are debugging or what directory you are in. */ if (system_gdbinit && !inhibit_gdbinit) +#ifdef CRASH_MERGE + catch_command_errors (source_script, system_gdbinit, -1, RETURN_MASK_ALL); +#else catch_command_errors (source_script, system_gdbinit, 0, RETURN_MASK_ALL); +#endif /* Read and execute $HOME/.gdbinit file, if it exists. This is done *before* all the command line arguments are processed; it sets @@ -888,7 +899,11 @@ captured_main (void *data) debugging or what directory you are in. */ if (home_gdbinit && !inhibit_gdbinit && !inhibit_home_gdbinit) +#ifdef CRASH_MERGE + catch_command_errors (source_script, home_gdbinit, -1, RETURN_MASK_ALL); +#else catch_command_errors (source_script, home_gdbinit, 0, RETURN_MASK_ALL); +#endif /* Process '-ix' and '-iex' options early. */ for (i = 0; VEC_iterate (cmdarg_s, cmdarg_vec, i, cmdarg_p); i++) @@ -929,8 +944,12 @@ captured_main (void *data) catch_command_errors returns non-zero on success! */ if (catch_command_errors (exec_file_attach, execarg, !batch_flag, RETURN_MASK_ALL)) +#ifdef CRASH_MERGE + catch_command_errors (symbol_file_add_main, symarg, 0, RETURN_MASK_ALL); +#else catch_command_errors (symbol_file_add_main, symarg, !batch_flag, RETURN_MASK_ALL); +#endif } else { @@ -992,8 +1011,12 @@ captured_main (void *data) { auto_load_local_gdbinit_loaded = 1; +#ifdef CRASH_MERGE + catch_command_errors (source_script, local_gdbinit, -1, RETURN_MASK_ALL); +#else catch_command_errors (source_script, local_gdbinit, 0, RETURN_MASK_ALL); +#endif } } @@ -1039,6 +1062,12 @@ captured_main (void *data) while (1) { catch_errors (captured_command_loop, 0, "", RETURN_MASK_ALL); +#ifdef CRASH_MERGE + { + int console(char *, ...); + console("\n"); + } +#endif } /* No exit -- exit is through quit_command. */ } @@ -1053,6 +1082,23 @@ gdb_main (struct captured_main_args *arg return 1; } +#ifdef CRASH_MERGE +/* + * NOTE: adapted from gdb.c, which is no longer built in; changed name of + * original main() to gdb_main_entry() for use as crash entry point + */ +int +gdb_main_entry (int argc, char **argv) +{ + struct captured_main_args args; + memset (&args, 0, sizeof args); + args.argc = argc; + args.argv = argv; + args.use_windows = 0; + args.interpreter_p = INTERP_CONSOLE; + return gdb_main (&args); +} +#endif /* Don't use *_filtered for printing help. We don't want to prompt for continue no matter how small the screen or how much we're going --- gdb-7.6/gdb/valprint.c.orig +++ gdb-7.6/gdb/valprint.c @@ -1768,8 +1768,13 @@ partial_memory_read (CORE_ADDR memaddr, this function instead? */ int +#ifdef CRASH_MERGE +valprint_read_string (CORE_ADDR addr, int len, int width, unsigned int fetchlimit, + enum bfd_endian byte_order, gdb_byte **buffer, int *bytes_read) +#else read_string (CORE_ADDR addr, int len, int width, unsigned int fetchlimit, enum bfd_endian byte_order, gdb_byte **buffer, int *bytes_read) +#endif { int found_nul; /* Non-zero if we found the nul char. */ int errcode; /* Errno returned from bad reads. */ @@ -2472,8 +2477,13 @@ val_print_string (struct type *elttype, fetchlimit = (len == -1 ? options->print_max : min (len, options->print_max)); +#ifdef CRASH_MERGE + errcode = valprint_read_string (addr, len, width, fetchlimit, byte_order, + &buffer, &bytes_read); +#else errcode = read_string (addr, len, width, fetchlimit, byte_order, &buffer, &bytes_read); +#endif old_chain = make_cleanup (xfree, buffer); addr += bytes_read; --- gdb-7.6/gdb/Makefile.in.orig +++ gdb-7.6/gdb/Makefile.in @@ -422,7 +422,7 @@ CONFIG_UNINSTALL = @CONFIG_UNINSTALL@ # It is also possible that you will need to add -I/usr/include/sys if # your system doesn't have fcntl.h in /usr/include (which is where it # should be according to Posix). -DEFS = @DEFS@ +DEFS = -DCRASH_MERGE @DEFS@ GDB_CFLAGS = -I. -I$(srcdir) -I$(srcdir)/common -I$(srcdir)/config \ -DLOCALEDIR="\"$(localedir)\"" $(DEFS) @@ -934,7 +934,7 @@ COMMON_OBS = $(DEPFILES) $(CONFIG_OBS) $ TSOBS = inflow.o -SUBDIRS = doc @subdirs@ data-directory $(GNULIB_BUILDDIR) +SUBDIRS = build_no_subdirs CLEANDIRS = $(SUBDIRS) # List of subdirectories in the build tree that must exist. @@ -969,8 +969,8 @@ generated_files = config.h observer.h ob $(COMPILE) $< $(POSTCOMPILE) -all: gdb$(EXEEXT) $(CONFIG_ALL) - @$(MAKE) $(FLAGS_TO_PASS) DO=all "DODIRS=`echo $(SUBDIRS) | sed 's/testsuite//'`" subdir_do +all: gdb$(EXEEXT) + @$(MAKE) -s $(FLAGS_TO_PASS) DO=all "DODIRS=`echo $(SUBDIRS) | sed 's/testsuite//'`" subdir_do installcheck: @@ -1172,15 +1172,16 @@ libgdb.a: $(LIBGDB_OBS) # Removing the old gdb first works better if it is running, at least on SunOS. gdb$(EXEEXT): gdb.o $(LIBGDB_OBS) $(ADD_DEPS) $(CDEPS) $(TDEPLIBS) - rm -f gdb$(EXEEXT) + @rm -f gdb$(EXEEXT) + @(cd ../..; make --no-print-directory GDB_FLAGS=-DGDB_7_6 library) $(CC_LD) $(INTERNAL_LDFLAGS) $(WIN32LDAPP) \ - -o gdb$(EXEEXT) gdb.o $(LIBGDB_OBS) \ - $(TDEPLIBS) $(TUI_LIBRARY) $(CLIBS) $(LOADLIBES) + -o $(shell /bin/cat mergeobj) $(LIBGDB_OBS) \ + $(TDEPLIBS) $(TUI_LIBRARY) $(CLIBS) $(LOADLIBES) $(shell /bin/cat mergelibs) # Convenience rule to handle recursion. $(LIBGNU) $(GNULIB_H): all-lib all-lib: $(GNULIB_BUILDDIR)/Makefile - @$(MAKE) $(FLAGS_TO_PASS) DO=all DODIRS=$(GNULIB_BUILDDIR) subdir_do + @$(MAKE) $(FLAGS_TO_PASS) DO=all DODIRS=$(GNULIB_BUILDDIR) subdir_do -s .PHONY: all-lib # Convenience rule to handle recursion. @@ -1389,12 +1390,12 @@ $(srcdir)/copying.c: @MAINTAINER_MODE_TR mv $(srcdir)/copying.tmp $(srcdir)/copying.c version.c: Makefile version.in - rm -f version.c-tmp version.c - echo '#include "version.h"' >> version.c-tmp - echo 'const char version[] = "'"`sed q ${srcdir}/version.in`"'";' >> version.c-tmp - echo 'const char host_name[] = "$(host_alias)";' >> version.c-tmp - echo 'const char target_name[] = "$(target_alias)";' >> version.c-tmp - mv version.c-tmp version.c + @rm -f version.c-tmp version.c + @echo '#include "version.h"' >> version.c-tmp + @echo 'const char version[] = "'"`sed q ${srcdir}/version.in`"'";' >> version.c-tmp + @echo 'const char host_name[] = "$(host_alias)";' >> version.c-tmp + @echo 'const char target_name[] = "$(target_alias)";' >> version.c-tmp + @mv version.c-tmp version.c observer.h: observer.sh doc/observer.texi ${srcdir}/observer.sh h ${srcdir}/doc/observer.texi observer.h --- gdb-7.6/gdb/c-lang.c.orig +++ gdb-7.6/gdb/c-lang.c @@ -307,7 +307,11 @@ c_get_string (struct value *value, gdb_b { CORE_ADDR addr = value_as_address (value); +#ifdef CRASH_MERGE + err = valprint_read_string (addr, *length, width, fetchlimit, +#else err = read_string (addr, *length, width, fetchlimit, +#endif byte_order, buffer, length); if (err) { --- gdb-7.6/readline/rltypedefs.h.orig +++ gdb-7.6/readline/rltypedefs.h @@ -31,10 +31,10 @@ extern "C" { #if !defined (_FUNCTION_DEF) # define _FUNCTION_DEF -typedef int Function (); -typedef void VFunction (); -typedef char *CPFunction (); -typedef char **CPPFunction (); +typedef int Function (void); +typedef void VFunction (void); +typedef char *CPFunction (void); +typedef char **CPPFunction (void); #endif /* _FUNCTION_DEF */ --- gdb-7.6/readline/readline.h.orig +++ gdb-7.6/readline/readline.h @@ -378,7 +378,7 @@ extern int rl_crlf PARAMS((void)); #if defined (USE_VARARGS) && defined (PREFER_STDARG) extern int rl_message (const char *, ...) __attribute__((__format__ (printf, 1, 2))); #else -extern int rl_message (); +extern int rl_message (void); #endif extern int rl_show_char PARAMS((int)); --- gdb-7.6/readline/misc.c.orig +++ gdb-7.6/readline/misc.c @@ -405,7 +405,7 @@ _rl_history_set_point () #if defined (VI_MODE) if (rl_editing_mode == vi_mode && _rl_keymap != vi_insertion_keymap) - rl_point = 0; + rl_point = rl_end; #endif /* VI_MODE */ if (rl_editing_mode == emacs_mode) --- gdb-7.6/Makefile.in.orig +++ gdb-7.6/Makefile.in @@ -342,6 +342,9 @@ AR_FOR_BUILD = @AR_FOR_BUILD@ AS_FOR_BUILD = @AS_FOR_BUILD@ CC_FOR_BUILD = @CC_FOR_BUILD@ CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@ +ifeq (${CRASH_TARGET}, PPC64) +CFLAGS_FOR_BUILD += -m64 -fPIC +endif CXXFLAGS_FOR_BUILD = @CXXFLAGS_FOR_BUILD@ CXX_FOR_BUILD = @CXX_FOR_BUILD@ DLLTOOL_FOR_BUILD = @DLLTOOL_FOR_BUILD@ @@ -407,6 +410,9 @@ GNATBIND = @GNATBIND@ GNATMAKE = @GNATMAKE@ CFLAGS = @CFLAGS@ +ifeq (${CRASH_TARGET}, PPC64) +CFLAGS += -m64 -fPIC +endif LDFLAGS = @LDFLAGS@ LIBCFLAGS = $(CFLAGS) CXXFLAGS = @CXXFLAGS@ --- gdb-7.6/gdb/defs.h.orig +++ gdb-7.6/gdb/defs.h @@ -802,4 +802,8 @@ enum block_enum #include "utils.h" +#ifdef CRASH_MERGE +extern int gdb_main_entry(int, char **); +extern void replace_ui_file_FILE(struct ui_file *, FILE *); +#endif #endif /* #ifndef DEFS_H */ --- gdb-7.6/bfd/elflink.c.orig +++ gdb-7.6/bfd/elflink.c @@ -4730,7 +4730,7 @@ error_free_dyn: struct elf_link_hash_entry *hlook; asection *slook; bfd_vma vlook; - size_t i, j, idx; + size_t i, j, idx = 0; hlook = weaks; weaks = hlook->u.weakdef; --- gdb-7.6/gdb/s390-nat.c.orig +++ gdb-7.6/gdb/s390-nat.c @@ -37,6 +37,8 @@ #include #include +#include + #ifndef HWCAP_S390_HIGH_GPRS #define HWCAP_S390_HIGH_GPRS 512 #endif --- gdb-7.6/gdb/printcmd.c.orig +++ gdb-7.6/gdb/printcmd.c @@ -573,11 +573,21 @@ print_address_symbolic (struct gdbarch * int unmapped = 0; int offset = 0; int line = 0; +#ifdef CRASH_MERGE + extern int gdb_print_callback(unsigned long); +#endif /* Throw away both name and filename. */ struct cleanup *cleanup_chain = make_cleanup (free_current_contents, &name); make_cleanup (free_current_contents, &filename); +#ifdef CRASH_MERGE + if (!gdb_print_callback(addr)) { + do_cleanups (cleanup_chain); + return 0; + } +#endif + if (build_address_symbolic (gdbarch, addr, do_demangle, &name, &offset, &filename, &line, &unmapped)) { --- gdb-7.6/bfd/bfd-in.h.orig +++ gdb-7.6/bfd/bfd-in.h @@ -294,9 +294,6 @@ typedef struct bfd_section *sec_ptr; #define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0) -#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE) -#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE) -#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE) /* Find the address one past the end of SEC. */ #define bfd_get_section_limit(bfd, sec) \ (((bfd)->direction != write_direction && (sec)->rawsize != 0 \ @@ -519,7 +516,6 @@ extern void warn_deprecated (const char #define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char) -#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE) extern bfd_boolean bfd_cache_close (bfd *abfd); --- gdb-7.6/bfd/bfd-in2.h.orig +++ gdb-7.6/bfd/bfd-in2.h @@ -301,9 +301,6 @@ typedef struct bfd_section *sec_ptr; #define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0) -#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE) -#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE) -#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE) /* Find the address one past the end of SEC. */ #define bfd_get_section_limit(bfd, sec) \ (((bfd)->direction != write_direction && (sec)->rawsize != 0 \ @@ -526,7 +523,6 @@ extern void warn_deprecated (const char #define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char) -#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE) extern bfd_boolean bfd_cache_close (bfd *abfd); @@ -1572,6 +1568,32 @@ struct relax_table { int size; }; +/* Note: the following are provided as inline functions rather than macros + because not all callers use the return value. A macro implementation + would use a comma expression, eg: "((ptr)->foo = val, TRUE)" and some + compilers will complain about comma expressions that have no effect. */ +static inline bfd_boolean +bfd_set_section_userdata (bfd * abfd ATTRIBUTE_UNUSED, asection * ptr, void * val) +{ + ptr->userdata = val; + return TRUE; +} + +static inline bfd_boolean +bfd_set_section_vma (bfd * abfd ATTRIBUTE_UNUSED, asection * ptr, bfd_vma val) +{ + ptr->vma = ptr->lma = val; + ptr->user_set_vma = TRUE; + return TRUE; +} + +static inline bfd_boolean +bfd_set_section_alignment (bfd * abfd ATTRIBUTE_UNUSED, asection * ptr, unsigned int val) +{ + ptr->alignment_power = val; + return TRUE; +} + /* These sections are global, and are managed by BFD. The application and target back end are not permitted to change the values in these sections. */ @@ -6095,6 +6117,14 @@ struct bfd unsigned int selective_search : 1; }; +/* See note beside bfd_set_section_userdata. */ +static inline bfd_boolean +bfd_set_cacheable (bfd * abfd, bfd_boolean val) +{ + abfd->cacheable = val; + return TRUE; +} + typedef enum bfd_error { bfd_error_no_error = 0, --- gdb-7.6/gdb/symtab.c.orig +++ gdb-7.6/gdb/symtab.c @@ -5405,7 +5405,7 @@ dump_enum(struct type *type, struct gnu_ { register int i; int len; - int lastval; + long long lastval; len = TYPE_NFIELDS (type); lastval = 0; @@ -5418,12 +5418,12 @@ dump_enum(struct type *type, struct gnu_ for (i = 0; i < len; i++) { fprintf_filtered(gdb_stdout, " %s", TYPE_FIELD_NAME (type, i)); - if (lastval != TYPE_FIELD_BITPOS (type, i)) { - fprintf_filtered (gdb_stdout, " = %d", - TYPE_FIELD_BITPOS (type, i)); - lastval = TYPE_FIELD_BITPOS (type, i); + if (lastval != TYPE_FIELD_ENUMVAL (type, i)) { + fprintf_filtered (gdb_stdout, " = %s", + plongest(TYPE_FIELD_ENUMVAL (type, i))); + lastval = TYPE_FIELD_ENUMVAL (type, i); } else - fprintf_filtered(gdb_stdout, " = %d", lastval); + fprintf_filtered(gdb_stdout, " = %s", plongest(lastval)); fprintf_filtered(gdb_stdout, "\n"); lastval++; } --- gdb-7.6/gdb/aarch64-linux-nat.c.orig +++ gdb-7.6/gdb/aarch64-linux-nat.c @@ -32,6 +32,7 @@ #include "elf/common.h" #include +#include #include #include "gregset.h" --- gdb-7.6/sim/igen/Makefile.in.orig +++ gdb-7.6/sim/igen/Makefile.in @@ -117,7 +117,7 @@ IGEN_OBJS=\ gen.o igen: igen.o $(IGEN_OBJS) - $(CC_FOR_BUILD) $(BUILD_LDFLAGS) -o igen igen.o $(IGEN_OBJS) $(LIBIBERTY_LIB) + $(CC_FOR_BUILD) $(BUILD_CFLAGS) $(BUILD_LDFLAGS) -o igen igen.o $(IGEN_OBJS) $(LIBIBERTY_LIB) igen.o: igen.c misc.h filter_host.h lf.h table.h ld-decode.h ld-cache.h ld-insn.h filter.h gen-model.h gen-itable.h gen-icache.h gen-idecode.h gen-engine.h gen-semantics.h gen-support.h gen.h igen.h $(CC_FOR_BUILD) $(BUILD_CFLAGS) -c $(srcdir)/igen.c --- gdb-7.6/sim/mips/cp1.c.orig +++ gdb-7.6/sim/mips/cp1.c @@ -1359,7 +1359,7 @@ fp_rsqrt2(sim_cpu *cpu, /* Conversion operations. */ uword64 -convert (sim_cpu *cpu, +sim_mips_convert (sim_cpu *cpu, address_word cia, int rm, uword64 op, --- gdb-7.6/sim/mips/sim-main.h.orig +++ gdb-7.6/sim/mips/sim-main.h @@ -770,8 +770,8 @@ unsigned64 fp_nmadd (SIM_STATE, unsigned64 op1, unsigned64 op2, unsigned64 fp_nmsub (SIM_STATE, unsigned64 op1, unsigned64 op2, unsigned64 op3, FP_formats fmt); #define NegMultiplySub(op1,op2,op3,fmt) fp_nmsub(SIM_ARGS, op1, op2, op3, fmt) -unsigned64 convert (SIM_STATE, int rm, unsigned64 op, FP_formats from, FP_formats to); -#define Convert(rm,op,from,to) convert (SIM_ARGS, rm, op, from, to) +unsigned64 sim_mips_convert (SIM_STATE, int rm, unsigned64 op, FP_formats from, FP_formats to); +#define Convert(rm,op,from,to) sim_mips_convert (SIM_ARGS, rm, op, from, to) unsigned64 convert_ps (SIM_STATE, int rm, unsigned64 op, FP_formats from, FP_formats to); #define ConvertPS(rm,op,from,to) convert_ps (SIM_ARGS, rm, op, from, to) --- gdb-7.6/readline/util.c +++ gdb-7.6/readline/util.c @@ -493,10 +493,13 @@ _rl_trace (va_alist) if (_rl_tracefp == 0) _rl_tropen (); + if (!_rl_tracefp) + goto out; vfprintf (_rl_tracefp, format, args); fprintf (_rl_tracefp, "\n"); fflush (_rl_tracefp); +out: va_end (args); } @@ -509,16 +512,17 @@ _rl_tropen () fclose (_rl_tracefp); sprintf (fnbuf, "/var/tmp/rltrace.%ld", getpid()); unlink(fnbuf); - _rl_tracefp = fopen (fnbuf, "w+"); + _rl_tracefp = fopen (fnbuf, "w+xe"); return _rl_tracefp != 0; } int _rl_trclose () { - int r; + int r = 0; - r = fclose (_rl_tracefp); + if (_rl_tracefp) + r = fclose (_rl_tracefp); _rl_tracefp = 0; return r; } --- gdb-7.6/gdb/symtab.c.orig +++ gdb-7.6/gdb/symtab.c @@ -5447,9 +5447,9 @@ eval_enum(struct type *type, struct gnu_ lastval = 0; for (i = 0; i < len; i++) { - if (lastval != TYPE_FIELD_BITPOS (type, i)) { - lastval = TYPE_FIELD_BITPOS (type, i); - } + if (lastval != TYPE_FIELD_ENUMVAL (type, i)) + lastval = TYPE_FIELD_ENUMVAL (type, i); + if (STREQ(TYPE_FIELD_NAME(type, i), req->name)) { req->tagname = "(unknown)"; req->value = lastval; --- gdb-7.6/gdb/symtab.c.orig +++ gdb-7.6/gdb/symtab.c @@ -5236,6 +5236,12 @@ gdb_command_funnel(struct gnu_request *r gdb_set_crash_block(req); break; + case GNU_GET_FUNCTION_RANGE: + sym = lookup_symbol(req->name, 0, VAR_DOMAIN, 0); + if (!find_pc_partial_function(req->pc, NULL, &req->addr, &req->addr2)) + req->flags |= GNU_COMMAND_FAILED; + break; + default: req->flags |= GNU_COMMAND_FAILED; break; --- gdb-7.6/opcodes/i386-dis.c.orig +++ gdb-7.6/opcodes/i386-dis.c @@ -11300,6 +11300,29 @@ get_sib (disassemble_info *info) } } +static char * +check_for_extensions(struct dis_private *priv) +{ + unsigned char ModRM; + + if ((priv->the_buffer[0] == 0x66) && + (priv->the_buffer[1] == 0x0f) && + (priv->the_buffer[2] == 0xae)) { + ModRM = priv->the_buffer[3]; + if (ModRM == 0xf8) + return "pcommit"; + + switch ((ModRM >> 3)) + { + case 0x6: + return "clwb"; + case 0x7: + return "clflushopt"; + } + } + return NULL; +} + static int print_insn (bfd_vma pc, disassemble_info *info) { @@ -11312,6 +11335,7 @@ print_insn (bfd_vma pc, disassemble_info struct dis_private priv; int prefix_length; int default_prefixes; + char *extension; priv.orig_sizeflag = AFLAG | DFLAG; if ((info->mach & bfd_mach_i386_i386) != 0) @@ -11575,6 +11599,7 @@ print_insn (bfd_vma pc, disassemble_info need_vex = 0; need_vex_reg = 0; vex_w_done = 0; + extension = NULL; if (dp->name == NULL && dp->op[0].bytemode == FLOATCODE) { @@ -11610,9 +11635,14 @@ print_insn (bfd_vma pc, disassemble_info name = prefix_name (all_prefixes[i], priv.orig_sizeflag); if (name == NULL) name = INTERNAL_DISASSEMBLER_ERROR; - (*info->fprintf_func) (info->stream, "%s", name); - return 1; - } + if ((extension = check_for_extensions(&priv))) { + strcpy(obuf, extension); + obufp = &obuf[strlen(obuf)]; + } else { + (*info->fprintf_func) (info->stream, "%s", name); + return 1; + } + } } /* Check if the REX prefix is used. */ @@ -11637,7 +11667,7 @@ print_insn (bfd_vma pc, disassemble_info all_prefixes[last_data_prefix] = 0; prefix_length = 0; - for (i = 0; i < (int) ARRAY_SIZE (all_prefixes); i++) + for (i = 0; !extension && i < (int) ARRAY_SIZE (all_prefixes); i++) if (all_prefixes[i]) { const char *name; @@ -11655,7 +11685,8 @@ print_insn (bfd_vma pc, disassemble_info return MAX_CODE_LENGTH; } - obufp = mnemonicendp; + if (!extension) + obufp = mnemonicendp; for (i = strlen (obuf) + prefix_length; i < 6; i++) oappend (" "); oappend (" "); crash-7.1.4/vas_crash.h0000775000000000000000000001005212634305150013445 0ustar rootroot/* vas_crash.h - kernel crash dump file format (on swap) * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * 10/99, Dave Winchell, Initial release for kernel crash dump support. * 11/12/99, Dave Winchell, Add support for in memory dumps. */ #include //#include void save_core(void); /* struct crash_map_hdr located at byte offset 0 */ /* on-disk formats */ #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(Page_Size - 1)))) #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(Page_Size - 1))) #define CRASH_MAGIC 0x9a8bccdd #define CRASH_SOURCE_PAGES 128 #define CRASH_SUB_MAP_BYTES ((u_long)round_page((CRASH_SOURCE_PAGES+1)*sizeof(u_long))) #define CRASH_SUB_MAP_PAGES (CRASH_SUB_MAP_BYTES / Page_Size) #define CRASH_UNCOMPR_BUF_PAGES (CRASH_SOURCE_PAGES + CRASH_SUB_MAP_PAGES) #define CRASH_COMPR_BUF_PAGES (CRASH_UNCOMPR_BUF_PAGES + (CRASH_UNCOMPR_BUF_PAGES/4)) #define CRASH_COMPESS_PRIME_PAGES (2*CRASH_COMPR_BUF_PAGES) #define CRASH_ZALLOC_PAGES 16*5*2 /* 2 to handle crash in crash */ #define CRASH_LOW_WATER_PAGES 100 #define HP_BIOS_HIGH_PAGES_USED 2000 #define CRASH_MARK_RESERVED(addr) (set_bit(PG_reserved,&mem_map[MAP_NR(addr)].flags)) #define CRASH_CLEAR_RESERVED(addr) (clear_bit(PG_reserved,&mem_map[MAP_NR(addr)].flags)) #ifdef NOT_DEF typedef int boolean_t; #endif #define TRUE 1 #define FALSE 0 /* mem structure */ struct mem_crash_map_hdr { long magic[4]; /* identify crash dump */ u_long map; /* location of map */ u_long map_pages; u_long data_pages; u_long compr_units; }; struct mem_crash_map_entry { u_long src_va; /* source start of larger non-contig block */ /* a src_va of -1 means that the dest_page_va * is the location of the next map page */ u_long dest_page_va; /* dest of this sub block */ u_long check_sum; /* check_sum for dest data */ }; /* file structure */ struct crash_map_hdr { long magic[4]; /* identify crash dump */ int blk_size; /* block size for this device */ int map_block; /* location of map */ int map_blocks; /* number of blocks for map */ }; struct crash_map_entry { u_long start_va; /* virtual address */ char *exp_data; /* expanded data in memory */ int start_blk; /* device location */ int num_blks; }; #define CRASH_OFFSET_BLKS 100 #define CRASH_MAGIC 0x9a8bccdd struct crash_map_hdr_v1 { long magic[4]; /* identify crash dump */ int blk_size; /* block size for this device */ int map_block; /* location of map */ int map_blocks; /* number of blocks for map */ int map_entries; u_long va_per_entry; /* va covered by each map_entry */ u_long bytes_not_dumped; /* ran out of swap space */ int total_blocks; /* CRASH_OFFSET_BLKS + header + map + data */ }; struct crash_map_entry_v1 { u_long start_va; /* virtual address */ char *exp_data; /* expanded data in memory */ int start_blk; /* device location */ int num_blks; int chk_sum; /* check sum */ }; crash-7.1.4/defs.h0000775000000000000000000057772112634305150012442 0ustar rootroot/* defs.h - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * Copyright (C) 2002 Silicon Graphics, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef GDB_COMMON #include #include #include #include #include #include #include #undef basename #if !defined(__USE_GNU) #define __USE_GNU #include #undef __USE_GNU #else #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* backtrace() */ #include #ifdef LZO #include #endif #ifdef SNAPPY #include #endif #ifndef ATTRIBUTE_UNUSED #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #endif #undef TRUE #undef FALSE #define TRUE (1) #define FALSE (0) #define STR(x) #x #ifndef offsetof # define offsetof(TYPE, MEMBER) ((ulong)&((TYPE *)0)->MEMBER) #endif #if !defined(X86) && !defined(X86_64) && !defined(ALPHA) && !defined(PPC) && \ !defined(IA64) && !defined(PPC64) && !defined(S390) && !defined(S390X) && \ !defined(ARM) && !defined(ARM64) && !defined(MIPS) #ifdef __alpha__ #define ALPHA #endif #ifdef __i386__ #define X86 #endif #ifdef __powerpc64__ #define PPC64 #else #ifdef __powerpc__ #define PPC #endif #endif #ifdef __ia64__ #define IA64 #endif #ifdef __s390__ #define S390 #endif #ifdef __s390x__ #define S390X #endif #ifdef __x86_64__ #define X86_64 #endif #ifdef __arm__ #define ARM #endif #ifdef __aarch64__ #define ARM64 #endif #ifdef __mipsel__ #define MIPS #endif #endif #ifdef X86 #define NR_CPUS (256) #endif #ifdef X86_64 #define NR_CPUS (8192) #endif #ifdef ALPHA #define NR_CPUS (64) #endif #ifdef PPC #define NR_CPUS (32) #endif #ifdef IA64 #define NR_CPUS (4096) #endif #ifdef PPC64 #define NR_CPUS (2048) #endif #ifdef S390 #define NR_CPUS (512) #endif #ifdef S390X #define NR_CPUS (512) #endif #ifdef ARM #define NR_CPUS (32) #endif #ifdef ARM64 #define NR_CPUS (4096) /* TBD */ #endif #ifdef MIPS #define NR_CPUS (32) #endif #define BUFSIZE (1500) #define NULLCHAR ('\0') #define MAXARGS (100) /* max number of arguments to one function */ #define MAXARGLEN (40) /* max length of argument */ #define HIST_BLKSIZE (4096) static inline int string_exists(char *s) { return (s ? TRUE : FALSE); } #define STREQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \ (strcmp((char *)(A), (char *)(B)) == 0)) #define STRNEQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \ (strncmp((char *)(A), (char *)(B), strlen((char *)(B))) == 0)) #define BZERO(S, N) (memset(S, NULLCHAR, N)) #define BCOPY(S, D, C) (memcpy(D, S, C)) #define BNEG(S, N) (memset(S, 0xff, N)) #define BEEP() fprintf(stderr, "%c", 0x7) #define LASTCHAR(s) (s[strlen(s)-1]) #define FIRSTCHAR(s) (s[0]) #define QUOTED_STRING(s) ((FIRSTCHAR(s) == '"') && (LASTCHAR(s) == '"')) #define SINGLE_QUOTED_STRING(s) ((FIRSTCHAR(s) == '\'') && (LASTCHAR(s) == '\'')) #define PATHEQ(A, B) ((A) && (B) && (pathcmp((char *)(A), (char *)(B)) == 0)) #ifdef roundup #undef roundup #endif #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) typedef uint64_t physaddr_t; #define PADDR_NOT_AVAILABLE (0x1ULL) typedef unsigned long long int ulonglong; struct number_option { ulong num; ulonglong ll_num; ulong retflags; }; /* * program_context flags */ #define LIVE_SYSTEM (0x1ULL) #define TTY (0x2ULL) #define RUNTIME (0x4ULL) #define IN_FOREACH (0x8ULL) #define MCLXCD (0x10ULL) #define CMDLINE_IFILE (0x20ULL) #define MFD_RDWR (0x40ULL) #define KVMDUMP (0x80ULL) #define SILENT (0x100ULL) #define SADUMP (0x200ULL) #define HASH (0x400ULL) #define SCROLL (0x800ULL) #define NO_CONSOLE (0x1000ULL) #define RUNTIME_IFILE (0x2000ULL) #define DROP_CORE (0x4000ULL) #define LKCD (0x8000ULL) #define GDB_INIT (0x10000ULL) #define IN_GDB (0x20000ULL) #define RCLOCAL_IFILE (0x40000ULL) #define RCHOME_IFILE (0x80000ULL) #define VMWARE_VMSS (0x100000ULL) #define READLINE (0x200000ULL) #define _SIGINT_ (0x400000ULL) #define IN_RESTART (0x800000ULL) #define KERNEL_DEBUG_QUERY (0x1000000ULL) #define DEVMEM (0x2000000ULL) #define REM_LIVE_SYSTEM (0x4000000ULL) #define NAMELIST_LOCAL (0x8000000ULL) #define MEMSRC_LOCAL (0x10000000ULL) #define NAMELIST_SAVED (0x20000000ULL) #define DUMPFILE_SAVED (0x40000000ULL) #define UNLINK_NAMELIST (0x80000000ULL) #define NAMELIST_UNLINKED (0x100000000ULL) #define REM_MCLXCD (0x200000000ULL) #define REM_LKCD (0x400000000ULL) #define NAMELIST_NO_GZIP (0x800000000ULL) #define UNLINK_MODULES (0x1000000000ULL) #define S390D (0x2000000000ULL) #define REM_S390D (0x4000000000ULL) #define SYSRQ (0x8000000000ULL) #define KDUMP (0x10000000000ULL) #define NETDUMP (0x20000000000ULL) #define REM_NETDUMP (0x40000000000ULL) #define SYSMAP (0x80000000000ULL) #define SYSMAP_ARG (0x100000000000ULL) #define MEMMOD (0x200000000000ULL) #define MODPRELOAD (0x400000000000ULL) #define DISKDUMP (0x800000000000ULL) #define DATADEBUG (0x1000000000000ULL) #define FINDKERNEL (0x2000000000000ULL) #define VERSION_QUERY (0x4000000000000ULL) #define READNOW (0x8000000000000ULL) #define NOCRASHRC (0x10000000000000ULL) #define INIT_IFILE (0x20000000000000ULL) #define XENDUMP (0x40000000000000ULL) #define XEN_HYPER (0x80000000000000ULL) #define XEN_CORE (0x100000000000000ULL) #define PLEASE_WAIT (0x200000000000000ULL) #define IFILE_ERROR (0x400000000000000ULL) #define KERNTYPES (0x800000000000000ULL) #define MINIMAL_MODE (0x1000000000000000ULL) #define CRASHBUILTIN (0x2000000000000000ULL) #define PRELOAD_EXTENSIONS \ (0x4000000000000000ULL) #define PROC_KCORE (0x8000000000000000ULL) #define ACTIVE() (pc->flags & LIVE_SYSTEM) #define DUMPFILE() (!(pc->flags & LIVE_SYSTEM)) #define LIVE() (pc->flags2 & LIVE_DUMP || pc->flags & LIVE_SYSTEM) #define MEMORY_SOURCES (NETDUMP|KDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP|XENDUMP|CRASHBUILTIN|KVMDUMP|PROC_KCORE|SADUMP|VMWARE_VMSS) #define DUMPFILE_TYPES (DISKDUMP|NETDUMP|KDUMP|MCLXCD|LKCD|S390D|XENDUMP|KVMDUMP|SADUMP|VMWARE_VMSS) #define REMOTE() (pc->flags2 & REMOTE_DAEMON) #define REMOTE_ACTIVE() (pc->flags & REM_LIVE_SYSTEM) #define REMOTE_DUMPFILE() \ (pc->flags & (REM_NETDUMP|REM_MCLXCD|REM_LKCD|REM_S390D)) #define REMOTE_MEMSRC() (REMOTE_ACTIVE() || REMOTE_PAUSED() || REMOTE_DUMPFILE()) #define LKCD_DUMPFILE() (pc->flags & (LKCD|REM_LKCD)) #define NETDUMP_DUMPFILE() (pc->flags & (NETDUMP|REM_NETDUMP)) #define DISKDUMP_DUMPFILE() (pc->flags & DISKDUMP) #define KDUMP_DUMPFILE() (pc->flags & KDUMP) #define XENDUMP_DUMPFILE() (pc->flags & XENDUMP) #define XEN_HYPER_MODE() (pc->flags & XEN_HYPER) #define SYSRQ_TASK(X) ((pc->flags & SYSRQ) && is_task_active(X)) #define XEN_CORE_DUMPFILE() (pc->flags & XEN_CORE) #define LKCD_KERNTYPES() (pc->flags & KERNTYPES) #define KVMDUMP_DUMPFILE() (pc->flags & KVMDUMP) #define SADUMP_DUMPFILE() (pc->flags & SADUMP) #define NETDUMP_LOCAL (0x1) /* netdump_data flags */ #define NETDUMP_REMOTE (0x2) #define VMCORE_VALID() (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE|KDUMP_LOCAL)) #define NETDUMP_ELF32 (0x4) #define NETDUMP_ELF64 (0x8) #define PARTIAL_DUMP (0x10) /* netdump or diskdump */ #define KDUMP_ELF32 (0x20) #define KDUMP_ELF64 (0x40) #define KDUMP_LOCAL (0x80) #define KCORE_LOCAL (0x100) #define KCORE_ELF32 (0x200) #define KCORE_ELF64 (0x400) #define QEMU_MEM_DUMP_KDUMP_BACKUP \ (0x800) #define KVMDUMP_LOCAL (0x1) #define KVMDUMP_VALID() (kvm->flags & (KVMDUMP_LOCAL)) #define DUMPFILE_FORMAT(flags) ((flags) & \ (NETDUMP_ELF32|NETDUMP_ELF64|KDUMP_ELF32|KDUMP_ELF64)) #define DISKDUMP_LOCAL (0x1) #define KDUMP_CMPRS_LOCAL (0x2) #define ERROR_EXCLUDED (0x4) #define ZERO_EXCLUDED (0x8) #define DUMPFILE_SPLIT (0x10) #define NO_ELF_NOTES (0x20) #define LZO_SUPPORTED (0x40) #define SNAPPY_SUPPORTED (0x80) #define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL) #define KDUMP_CMPRS_VALID() (dd->flags & KDUMP_CMPRS_LOCAL) #define KDUMP_SPLIT() (dd->flags & DUMPFILE_SPLIT) #define XENDUMP_LOCAL (0x1) #define XENDUMP_VALID() (xd->flags & XENDUMP_LOCAL) #define SADUMP_LOCAL (0x1) #define SADUMP_DISKSET (0x2) #define SADUMP_MEDIA (0x4) #define SADUMP_ZERO_EXCLUDED (0x8) #define SADUMP_KDUMP_BACKUP (0x10) #define SADUMP_VALID() (sd->flags & SADUMP_LOCAL) #define CRASHDEBUG(x) (pc->debug >= (x)) #define CRASHDEBUG_SUSPEND(X) { pc->debug_save = pc->debug; pc->debug = X; } #define CRASHDEBUG_RESTORE() { pc->debug = pc->debug_save; } #define VERBOSE (0x1) #define ADDRESS_SPECIFIED (0x2) #define FAULT_ON_ERROR (0x1) #define RETURN_ON_ERROR (0x2) #define QUIET (0x4) #define HEX_BIAS (0x8) #define LONG_LONG (0x10) #define RETURN_PARTIAL (0x20) #define NO_DEVMEM_SWITCH (0x40) #define SEEK_ERROR (-1) #define READ_ERROR (-2) #define WRITE_ERROR (-3) #define PAGE_EXCLUDED (-4) #define RESTART() (longjmp(pc->main_loop_env, 1)) #define RESUME_FOREACH() (longjmp(pc->foreach_loop_env, 1)) #define INFO (1) #define FATAL (2) #define FATAL_RESTART (3) #define WARNING (4) #define NOTE (5) #define CONT (6) #define FATAL_ERROR(x) (((x) == FATAL) || ((x) == FATAL_RESTART)) #define CONSOLE_OFF(x) ((x) = console_off()) #define CONSOLE_ON(x) (console_on(x)) #define RADIX(X) (X) #define NUM_HEX (0x1) #define NUM_DEC (0x2) #define NUM_EXPR (0x4) #define NUM_ANY (NUM_HEX|NUM_DEC|NUM_EXPR) /* * program context redirect flags */ #define FROM_COMMAND_LINE (0x1) #define FROM_INPUT_FILE (0x2) #define REDIRECT_NOT_DONE (0x4) #define REDIRECT_TO_PIPE (0x8) #define REDIRECT_TO_STDPIPE (0x10) #define REDIRECT_TO_FILE (0x20) #define REDIRECT_FAILURE (0x40) #define REDIRECT_SHELL_ESCAPE (0x80) #define REDIRECT_SHELL_COMMAND (0x100) #define REDIRECT_PID_KNOWN (0x200) #define REDIRECT_MULTI_PIPE (0x400) #define PIPE_OPTIONS (FROM_COMMAND_LINE | FROM_INPUT_FILE | REDIRECT_TO_PIPE | \ REDIRECT_TO_STDPIPE | REDIRECT_TO_FILE) #define DEFAULT_REDHAT_DEBUG_LOCATION "/usr/lib/debug/lib/modules" #define MEMORY_DRIVER_MODULE "crash" #define MEMORY_DRIVER_DEVICE "/dev/crash" #define MEMORY_DRIVER_DEVICE_MODE (S_IFCHR|S_IRUSR) /* * structure definitions */ struct program_context { char *program_name; /* this program's name */ char *program_path; /* unadulterated argv[0] */ char *program_version; /* this program's version */ char *gdb_version; /* embedded gdb version */ char *prompt; /* this program's prompt */ unsigned long long flags; /* flags from above */ char *namelist; /* linux namelist */ char *dumpfile; /* dumpfile or /dev/kmem */ char *live_memsrc; /* live memory driver */ char *system_map; /* get symbol values from System.map */ char *namelist_debug; /* namelist containing debug data */ char *debuginfo_file; /* separate debuginfo file */ char *memory_module; /* alternative to mem.c driver */ char *memory_device; /* alternative to /dev/[k]mem device */ char *machine_type; /* machine's processor type */ char *editing_mode; /* readline vi or emacs */ char *server; /* network daemon */ char *server_memsrc; /* memory source on server */ char *server_namelist; /* kernel namelist on server */ int nfd; /* linux namelist fd */ int mfd; /* /dev/mem fd */ int kfd; /* /dev/kmem fd */ int dfd; /* dumpfile fd */ int confd; /* console fd */ int sockfd; /* network daemon socket */ ushort port; /* network daemon port */ int rmfd; /* remote server memory source fd */ int rkfd; /* remote server /dev/kmem fd */ ulong program_pid; /* program pid */ ulong server_pid; /* server pid */ ulong rcvbufsize; /* client-side receive buffer size */ char *home; /* user's home directory */ char command_line[BUFSIZE]; /* possibly parsed input command line */ char orig_line[BUFSIZE]; /* original input line */ char *readline; /* pointer to last readline() return */ char my_tty[10]; /* real tty name (shown by ps -ef) */ ulong debug; /* level of debug */ ulong debug_save; /* saved level for debug-suspend */ char *console; /* current debug console device */ char *redhat_debug_loc; /* location of matching debug objects */ int pipefd[2]; /* output pipe file descriptors */ FILE *nullfp; /* bitbucket */ FILE *stdpipe; /* standard pipe for output */ FILE *pipe; /* command line specified pipe */ FILE *ofile; /* command line specified output file */ FILE *ifile; /* command line specified input file */ FILE *ifile_pipe; /* output pipe specified from file */ FILE *ifile_ofile; /* output file specified from file */ FILE *symfile; /* symbol table data file */ FILE *symfile2; /* alternate access to above */ FILE *tmpfile; /* tmpfile for selective data output */ FILE *saved_fp; /* for printing while parsing tmpfile */ FILE *tmp_fp; /* stored tmpfile pointer */ char *input_file; /* input file specified at invocation */ FILE *tmpfile2; /* tmpfile2 does not use save_fp! */ int eoc_index; /* end of redirected command index */ int scroll_command; /* default scroll command for output */ #define SCROLL_NONE 0 #define SCROLL_LESS 1 #define SCROLL_MORE 2 #define SCROLL_CRASHPAGER 3 ulong redirect; /* per-cmd origin and output flags */ pid_t stdpipe_pid; /* per-cmd standard output pipe's pid */ pid_t pipe_pid; /* per-cmd output pipe's pid */ pid_t pipe_shell_pid; /* per-cmd output pipe's shell pid */ char pipe_command[BUFSIZE]; /* pipe command line */ struct command_table_entry *cmd_table; /* linux/xen command table */ char *curcmd; /* currently-executing command */ char *lastcmd; /* previously-executed command */ ulong cmdgencur; /* current command generation number */ ulong curcmd_flags; /* general purpose per-command flag */ #define XEN_MACHINE_ADDR (0x1) #define REPEAT (0x2) #define IDLE_TASK_SHOWN (0x4) #define TASK_SPECIFIED (0x8) #define MEMTYPE_UVADDR (0x10) #define MEMTYPE_FILEADDR (0x20) #define HEADER_PRINTED (0x40) #define BAD_INSTRUCTION (0x80) #define UD2A_INSTRUCTION (0x100) #define IRQ_IN_USE (0x200) #define NO_MODIFY (0x400) #define IGNORE_ERRORS (0x800) #define FROM_RCFILE (0x1000) #define MEMTYPE_KVADDR (0x2000) #define MOD_SECTIONS (0x4000) #define MOD_READNOW (0x8000) #define MM_STRUCT_FORCE (0x10000) #define CPUMASK (0x20000) #define PARTIAL_READ_OK (0x40000) ulonglong curcmd_private; /* general purpose per-command info */ int cur_gdb_cmd; /* current gdb command */ int last_gdb_cmd; /* previously-executed gdb command */ int sigint_cnt; /* number of ignored SIGINTs */ struct gnu_request *cur_req; /* current gdb gnu_request */ struct sigaction sigaction; /* general usage sigaction. */ struct sigaction gdb_sigaction; /* gdb's SIGINT sigaction. */ jmp_buf main_loop_env; /* longjmp target default */ jmp_buf foreach_loop_env; /* longjmp target within foreach */ jmp_buf gdb_interface_env; /* longjmp target for gdb error catch */ struct termios termios_orig; /* non-raw settings */ struct termios termios_raw; /* while gathering command input */ int ncmds; /* number of commands in menu */ char **cmdlist; /* current list of available commands */ int cmdlistsz; /* space available in cmdlist */ unsigned output_radix; /* current gdb output_radix */ void *sbrk; /* current sbrk value */ struct extension_table *curext; /* extension being loaded */ int (*readmem)(int, void *, int, ulong, physaddr_t); /* memory access */ int (*writemem)(int, void *, int, ulong, physaddr_t);/* memory access */ ulong ifile_in_progress; /* original xxx_IFILE flags */ off_t ifile_offset; /* current offset into input file */ char *runtime_ifile_cmd; /* runtime command using input file */ char *kvmdump_mapfile; /* storage of physical to file offsets */ ulonglong flags2; /* flags overrun */ #define FLAT (0x01ULL) #define ELF_NOTES (0x02ULL) #define GET_OSRELEASE (0x04ULL) #define REMOTE_DAEMON (0x08ULL) #define ERASEINFO_DATA (0x10ULL) #define GDB_CMD_MODE (0x20ULL) #define LIVE_DUMP (0x40ULL) #define FLAT_FORMAT() (pc->flags2 & FLAT) #define ELF_NOTES_VALID() (pc->flags2 & ELF_NOTES) #define RADIX_OVERRIDE (0x80ULL) #define QEMU_MEM_DUMP_ELF (0x100ULL) #define GET_LOG (0x200ULL) #define VMCOREINFO (0x400ULL) #define ALLOW_FP (0x800ULL) #define REM_PAUSED_F (0x1000ULL) #define RAMDUMP (0x2000ULL) #define REMOTE_PAUSED() (pc->flags2 & REM_PAUSED_F) #define OFFLINE_HIDE (0x4000ULL) #define INCOMPLETE_DUMP (0x8000ULL) #define is_incomplete_dump() (pc->flags2 & INCOMPLETE_DUMP) #define QEMU_MEM_DUMP_COMPRESSED (0x10000ULL) #define SNAP (0x20000ULL) #define EXCLUDED_VMEMMAP (0x40000ULL) #define is_excluded_vmemmap() (pc->flags2 & EXCLUDED_VMEMMAP) char *cleanup; char *namelist_orig; char *namelist_debug_orig; FILE *args_ifile; /* per-command args input file */ void (*cmd_cleanup)(void *); /* per-command cleanup function */ void *cmd_cleanup_arg; /* optional cleanup function argument */ ulong scope; /* optional text context address */ ulong nr_hash_queues; /* hash queue head count */ char *(*read_vmcoreinfo)(const char *); }; #define READMEM pc->readmem typedef void (*cmd_func_t)(void); struct command_table_entry { /* one for each command in menu */ char *name; cmd_func_t func; char **help_data; ulong flags; }; struct args_input_file { int index; int args_used; int is_gdb_cmd; int in_expression; int start; int resume; char *fileptr; }; #define REFRESH_TASK_TABLE (0x1) /* command_table_entry flags */ #define HIDDEN_COMMAND (0x2) #define CLEANUP (0x4) /* for extensions only */ #define MINIMAL (0x8) /* * A linked list of extension table structures keeps track of the current * set of shared library extensions. */ struct extension_table { void *handle; /* handle from dlopen() */ char *filename; /* name of shared library */ struct command_table_entry *command_table; /* list of commands */ ulong flags; /* registration flags */ struct extension_table *next, *prev; /* bookkeeping */ }; #define REGISTERED (0x1) /* extension_table flags */ #define DUPLICATE_COMMAND_NAME (0x2) #define NO_MINIMAL_COMMANDS (0x4) struct new_utsname { char sysname[65]; char nodename[65]; char release[65]; char version[65]; char machine[65]; char domainname[65]; }; #define NO_MODULE_ACCESS (0x1) #define TVEC_BASES_V1 (0x2) #define GCC_3_2 (0x4) #define GCC_3_2_3 (0x8) #define GCC_2_96 (0x10) #define RA_SEEK (0x20) #define NO_RA_SEEK (0x40) #define KALLSYMS_V1 (0x80) #define NO_KALLSYMS (0x100) #define PER_CPU_OFF (0x200) #define SMP (0x400) #define GCC_3_3_2 (0x800) #define KMOD_V1 (0x1000) #define KMOD_V2 (0x2000) #define KALLSYMS_V2 (0x2000) #define TVEC_BASES_V2 (0x4000) #define GCC_3_3_3 (0x8000) #define USE_OLD_BT (0x10000) #define ARCH_XEN (0x20000) #define NO_IKCONFIG (0x40000) #define DWARF_UNWIND (0x80000) #define NO_DWARF_UNWIND (0x100000) #define DWARF_UNWIND_MEMORY (0x200000) #define DWARF_UNWIND_EH_FRAME (0x400000) #define DWARF_UNWIND_CAPABLE (DWARF_UNWIND_MEMORY|DWARF_UNWIND_EH_FRAME) #define DWARF_UNWIND_MODULES (0x800000) #define BUGVERBOSE_OFF (0x1000000) #define RELOC_SET (0x2000000) #define RELOC_FORCE (0x4000000) #define ARCH_OPENVZ (0x8000000) #define ARCH_PVOPS (0x10000000) #define PRE_KERNEL_INIT (0x20000000) #define ARCH_PVOPS_XEN (0x40000000) #define IRQ_DESC_TREE (0x80000000) #define GCC_VERSION_DEPRECATED (GCC_3_2|GCC_3_2_3|GCC_2_96|GCC_3_3_2|GCC_3_3_3) /* flags2 */ #define RELOC_AUTO (0x1ULL) #define KASLR (0x2ULL) #define KASLR_CHECK (0x4ULL) #define GET_TIMESTAMP (0x8ULL) #define TVEC_BASES_V3 (0x10ULL) #define XEN() (kt->flags & ARCH_XEN) #define OPENVZ() (kt->flags & ARCH_OPENVZ) #define PVOPS() (kt->flags & ARCH_PVOPS) #define PVOPS_XEN() (kt->flags & ARCH_PVOPS_XEN) #define XEN_MACHINE_TO_MFN(m) ((ulonglong)(m) >> PAGESHIFT()) #define XEN_PFN_TO_PSEUDO(p) ((ulonglong)(p) << PAGESHIFT()) #define XEN_MFN_NOT_FOUND (~0UL) #define XEN_PFNS_PER_PAGE (PAGESIZE()/sizeof(ulong)) #define XEN_FOREIGN_FRAME (1UL << (BITS()-1)) #define XEN_MACHADDR_NOT_FOUND (~0ULL) #define XEN_P2M_PER_PAGE (PAGESIZE() / sizeof(unsigned long)) #define XEN_P2M_MID_PER_PAGE (PAGESIZE() / sizeof(unsigned long *)) #define XEN_P2M_TOP_PER_PAGE (PAGESIZE() / sizeof(unsigned long **)) struct kernel_table { /* kernel data */ ulong flags; ulong stext; ulong etext; ulong stext_init; ulong etext_init; ulong init_begin; ulong init_end; ulong end; int cpus; char *cpus_override; void (*display_bh)(void); ulong module_list; ulong kernel_module; int mods_installed; struct timespec date; char proc_version[BUFSIZE]; struct new_utsname utsname; uint kernel_version[3]; uint gcc_version[3]; int runq_siblings; int kernel_NR_CPUS; long __per_cpu_offset[NR_CPUS]; long *__rq_idx; long *__cpu_idx; ulong *cpu_flags; #define POSSIBLE (0x1) #define PRESENT (0x2) #define ONLINE (0x4) #define NMI (0x8) #define POSSIBLE_MAP (POSSIBLE) #define PRESENT_MAP (PRESENT) #define ONLINE_MAP (ONLINE) #define ACTIVE_MAP (0x10) int BUG_bytes; ulong xen_flags; #define WRITABLE_PAGE_TABLES (0x1) #define SHADOW_PAGE_TABLES (0x2) #define CANONICAL_PAGE_TABLES (0x4) #define XEN_SUSPEND (0x8) char *m2p_page; ulong phys_to_machine_mapping; ulong p2m_table_size; #define P2M_MAPPING_CACHE (512) struct p2m_mapping_cache { ulong mapping; ulong pfn; ulong start; ulong end; } p2m_mapping_cache[P2M_MAPPING_CACHE]; #define P2M_MAPPING_PAGE_PFN(c) \ (PVOPS_XEN() ? kt->p2m_mapping_cache[c].pfn : \ (((kt->p2m_mapping_cache[c].mapping - kt->phys_to_machine_mapping)/PAGESIZE()) \ * XEN_PFNS_PER_PAGE)) ulong last_mapping_read; ulong p2m_cache_index; ulong p2m_pages_searched; ulong p2m_mfn_cache_hits; ulong p2m_page_cache_hits; ulong relocate; char *module_tree; struct pvops_xen_info { int p2m_top_entries; ulong p2m_top; ulong p2m_mid_missing; ulong p2m_missing; } pvops_xen; int highest_irq; #define IKCONFIG_AVAIL 0x1 /* kernel contains ikconfig data */ #define IKCONFIG_LOADED 0x2 /* ikconfig data is currently loaded */ int ikconfig_flags; int ikconfig_ents; char *hypervisor; struct vmcoreinfo_data { ulong log_buf_SYMBOL; ulong log_end_SYMBOL; ulong log_buf_len_SYMBOL; ulong logged_chars_SYMBOL; ulong log_first_idx_SYMBOL; ulong log_next_idx_SYMBOL; long log_SIZE; long log_ts_nsec_OFFSET; long log_len_OFFSET; long log_text_len_OFFSET; long log_dict_len_OFFSET; ulong phys_base_SYMBOL; ulong _stext_SYMBOL; } vmcoreinfo; ulonglong flags2; char *source_tree; }; /* * Aid for the two versions of the kernel's module list linkage. */ #define NEXT_MODULE(next_module, modbuf) \ { \ switch (kt->flags & (KMOD_V1|KMOD_V2)) \ { \ case KMOD_V1: \ next_module = ULONG(modbuf + OFFSET(module_next)); \ break; \ case KMOD_V2: \ next_module = ULONG(modbuf + OFFSET(module_list)); \ if (next_module != kt->kernel_module) \ next_module -= OFFSET(module_list); \ break; \ } \ } #define THIS_KERNEL_VERSION ((kt->kernel_version[0] << 16) + \ (kt->kernel_version[1] << 8) + \ (kt->kernel_version[2])) #define LINUX(x,y,z) (((uint)(x) << 16) + ((uint)(y) << 8) + (uint)(z)) #define THIS_GCC_VERSION ((kt->gcc_version[0] << 16) + \ (kt->gcc_version[1] << 8) + \ (kt->gcc_version[2])) #define GCC(x,y,z) (((uint)(x) << 16) + ((uint)(y) << 8) + (uint)(z)) #define IS_KERNEL_STATIC_TEXT(x) (((ulong)(x) >= kt->stext) && \ ((ulong)(x) < kt->etext)) #define TASK_COMM_LEN 16 /* task command name length including NULL */ struct task_context { /* context stored for each task */ ulong task; ulong thread_info; ulong pid; char comm[TASK_COMM_LEN+1]; int processor; ulong ptask; ulong mm_struct; struct task_context *tc_next; }; struct tgid_context { /* tgid and task stored for each task */ ulong tgid; ulong task; }; struct task_table { /* kernel/local task table data */ struct task_context *current; struct task_context *context_array; void (*refresh_task_table)(void); ulong flags; ulong task_start; ulong task_end; void *task_local; int max_tasks; int nr_threads; ulong running_tasks; ulong retries; ulong panicmsg; int panic_processor; ulong *idle_threads; ulong *panic_threads; ulong *active_set; ulong *panic_ksp; ulong *hardirq_ctx; ulong *hardirq_tasks; ulong *softirq_ctx; ulong *softirq_tasks; ulong panic_task; ulong this_task; int pidhash_len; ulong pidhash_addr; ulong last_task_read; ulong last_thread_info_read; ulong last_mm_read; char *task_struct; char *thread_info; char *mm_struct; ulong init_pid_ns; struct tgid_context *tgid_array; struct tgid_context *last_tgid; ulong tgid_searches; ulong tgid_cache_hits; long filepages; long anonpages; }; #define TASK_INIT_DONE (0x1) #define TASK_ARRAY_EXISTS (0x2) #define PANIC_TASK_NOT_FOUND (0x4) #define TASK_REFRESH (0x8) #define TASK_REFRESH_OFF (0x10) #define PANIC_KSP (0x20) #define ACTIVE_SET (0x40) #define POPULATE_PANIC (0x80) #define PIDHASH (0x100) #define PID_HASH (0x200) #define THREAD_INFO (0x400) #define IRQSTACKS (0x800) #define TIMESPEC (0x1000) #define NO_TIMESPEC (0x2000) #define ACTIVE_ONLY (0x4000) #define TASK_SLUSH (20) #define NO_PROC_ID 0xFF /* No processor magic marker (from kernel) */ /* * Global "tt" points to task_table */ #define CURRENT_CONTEXT() (tt->current) #define CURRENT_TASK() (tt->current->task) #define CURRENT_PID() (tt->current->pid) #define CURRENT_COMM() (tt->current->comm) #define RUNNING_TASKS() (tt->running_tasks) #define FIRST_CONTEXT() (tt->context_array) #define NO_PID ((ulong)-1) #define NO_TASK (0) #define IS_TASK_ADDR(X) (machdep->is_task_addr(X)) #define GET_STACKBASE(X) (machdep->get_stackbase(X)) #define GET_STACKTOP(X) (machdep->get_stacktop(X)) #define STACKSIZE() (machdep->stacksize) #define LONGS_PER_STACK (machdep->stacksize/sizeof(ulong)) #define INSTACK(X,BT) \ (((ulong)(X) >= (BT)->stackbase) && ((ulong)(X) < (BT)->stacktop)) #define ALIGNED_STACK_OFFSET(task) ((ulong)(task) & (STACKSIZE()-1)) #define BITS() (machdep->bits) #define BITS32() (machdep->bits == 32) #define BITS64() (machdep->bits == 64) #define IS_KVADDR(X) (machdep->is_kvaddr(X)) #define IS_UVADDR(X,C) (machdep->is_uvaddr(X,C)) #define PID_ALIVE(x) (kill(x, 0) == 0) struct kernel_list_head { struct kernel_list_head *next, *prev; }; struct stack_hook { ulong esp; ulong eip; }; struct bt_info { ulong task; ulonglong flags; ulong instptr; ulong stkptr; ulong bptr; ulong stackbase; ulong stacktop; char *stackbuf; struct task_context *tc; struct stack_hook *hp; struct stack_hook *textlist; struct reference *ref; ulong frameptr; char *call_target; void *machdep; ulong debug; ulong eframe_ip; ulong radix; ulong *cpumask; }; #define STACK_OFFSET_TYPE(OFF) \ (((ulong)(OFF) > STACKSIZE()) ? \ (ulong)((ulong)(OFF) - (ulong)(bt->stackbase)) : (ulong)(OFF)) #define GET_STACK_ULONG(OFF) \ *((ulong *)((char *)(&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(OFF))]))) #define GET_STACK_DATA(OFF, LOC, SZ) memcpy((void *)(LOC), \ (void *)(&bt->stackbuf[(ulong)STACK_OFFSET_TYPE(OFF)]), (size_t)(SZ)) struct machine_specific; /* uniquely defined below each machine's area */ struct xendump_data; struct xen_kdump_data; struct vaddr_range { ulong start; ulong end; ulong type; #define KVADDR_UNITY_MAP (1) #define KVADDR_VMALLOC (2) #define KVADDR_VMEMMAP (3) #define KVADDR_START_MAP (4) #define KVADDR_MODULES (5) #define MAX_KVADDR_RANGES KVADDR_MODULES }; #define MAX_MACHDEP_ARGS 5 /* for --machdep/-m machine-specific args */ struct machdep_table { ulong flags; ulong kvbase; ulong identity_map_base; uint pagesize; uint pageshift; ulonglong pagemask; ulong pageoffset; ulong stacksize; uint hz; ulong mhz; int bits; int nr_irqs; uint64_t memsize; int (*eframe_search)(struct bt_info *); void (*back_trace)(struct bt_info *); ulong (*processor_speed)(void); int (*uvtop)(struct task_context *, ulong, physaddr_t *, int); int (*kvtop)(struct task_context *, ulong, physaddr_t *, int); ulong (*get_task_pgd)(ulong); void (*dump_irq)(int); void (*get_stack_frame)(struct bt_info *, ulong *, ulong *); ulong (*get_stackbase)(ulong); ulong (*get_stacktop)(ulong); int (*translate_pte)(ulong, void *, ulonglong); uint64_t (*memory_size)(void); ulong (*vmalloc_start)(void); int (*is_task_addr)(ulong); int (*verify_symbol)(const char *, ulong, char); int (*dis_filter)(ulong, char *, unsigned int); int (*get_smp_cpus)(void); int (*is_kvaddr)(ulong); int (*is_uvaddr)(ulong, struct task_context *); int (*verify_paddr)(uint64_t); void (*cmd_mach)(void); void (*init_kernel_pgd)(void); struct syment *(*value_to_symbol)(ulong, ulong *); struct line_number_hook { char *func; char **file; } *line_number_hooks; ulong last_pgd_read; ulong last_pud_read; ulong last_pmd_read; ulong last_ptbl_read; char *pgd; char *pud; char *pmd; char *ptbl; int ptrs_per_pgd; char *cmdline_args[MAX_MACHDEP_ARGS]; struct machine_specific *machspec; ulong section_size_bits; ulong max_physmem_bits; ulong sections_per_root; int (*xendump_p2m_create)(struct xendump_data *); ulong (*xendump_panic_task)(struct xendump_data *); void (*get_xendump_regs)(struct xendump_data *, struct bt_info *, ulong *, ulong *); void (*clear_machdep_cache)(void); int (*xen_kdump_p2m_create)(struct xen_kdump_data *); int (*in_alternate_stack)(int, ulong); void (*dumpfile_init)(int, void *); void (*process_elf_notes)(void *, unsigned long); int (*get_kvaddr_ranges)(struct vaddr_range *); int (*verify_line_number)(ulong, ulong, ulong); void (*get_irq_affinity)(int); void (*show_interrupts)(int, ulong *); }; /* * Processor-common flags; processor-specific flags use the lower bits * as defined in their processor-specific files below. (see KSYMS_START defs). */ #define HWRESET (0x80000000) #define OMIT_FRAME_PTR (0x40000000) #define FRAMESIZE_DEBUG (0x20000000) #define MACHDEP_BT_TEXT (0x10000000) #define DEVMEMRD (0x8000000) #define INIT (0x4000000) #define VM_4_LEVEL (0x2000000) #define MCA (0x1000000) #define PAE (0x800000) #define VMEMMAP (0x400000) extern struct machdep_table *machdep; #ifndef HZ #define HZ sysconf(_SC_CLK_TCK) #endif #define IS_LAST_PGD_READ(pgd) ((ulong)(pgd) == machdep->last_pgd_read) #define IS_LAST_PMD_READ(pmd) ((ulong)(pmd) == machdep->last_pmd_read) #define IS_LAST_PTBL_READ(ptbl) ((ulong)(ptbl) == machdep->last_ptbl_read) #define IS_LAST_PUD_READ(pud) ((ulong)(pud) == machdep->last_pud_read) #define FILL_PGD(PGD, TYPE, SIZE) \ if (!IS_LAST_PGD_READ(PGD)) { \ readmem((ulonglong)((ulong)(PGD)), TYPE, machdep->pgd, \ SIZE, "pgd page", FAULT_ON_ERROR); \ machdep->last_pgd_read = (ulong)(PGD); \ } #define FILL_PUD(PUD, TYPE, SIZE) \ if (!IS_LAST_PUD_READ(PUD)) { \ readmem((ulonglong)((ulong)(PUD)), TYPE, machdep->pud, \ SIZE, "pud page", FAULT_ON_ERROR); \ machdep->last_pud_read = (ulong)(PUD); \ } #define FILL_PMD(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->last_pmd_read = (ulong)(PMD); \ } #define FILL_PTBL(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->last_ptbl_read = (ulong)(PTBL); \ } #define SETUP_ENV (0) #define PRE_SYMTAB (1) #define PRE_GDB (2) #define POST_GDB (3) #define POST_INIT (4) #define POST_VM (5) #define LOG_ONLY (6) #define FOREACH_BT (1) #define FOREACH_VM (2) #define FOREACH_TASK (3) #define FOREACH_SET (4) #define FOREACH_FILES (5) #define FOREACH_NET (6) #define FOREACH_TEST (7) #define FOREACH_VTOP (8) #define FOREACH_SIG (9) #define FOREACH_PS (10) #define MAX_FOREACH_KEYWORDS (10) #define MAX_FOREACH_TASKS (50) #define MAX_FOREACH_PIDS (50) #define MAX_FOREACH_COMMS (50) #define MAX_FOREACH_ARGS (50) #define MAX_REGEX_ARGS (10) #define FOREACH_CMD (0x1) #define FOREACH_r_FLAG (0x2) #define FOREACH_s_FLAG (0x4) #define FOREACH_S_FLAG (0x8) #define FOREACH_i_FLAG (0x10) #define FOREACH_e_FLAG (0x20) #define FOREACH_g_FLAG (0x40) #define FOREACH_l_FLAG (0x80) #define FOREACH_p_FLAG (0x100) #define FOREACH_t_FLAG (0x200) #define FOREACH_u_FLAG (0x400) #define FOREACH_m_FLAG (0x800) #define FOREACH_v_FLAG (0x1000) #define FOREACH_KERNEL (0x2000) #define FOREACH_USER (0x4000) #define FOREACH_SPECIFIED (0x8000) #define FOREACH_ACTIVE (0x10000) #define FOREACH_k_FLAG (0x20000) #define FOREACH_c_FLAG (0x40000) #define FOREACH_f_FLAG (0x80000) #define FOREACH_o_FLAG (0x100000) #define FOREACH_T_FLAG (0x200000) #define FOREACH_F_FLAG (0x400000) #define FOREACH_x_FLAG (0x800000) #define FOREACH_d_FLAG (0x1000000) #define FOREACH_STATE (0x2000000) #define FOREACH_a_FLAG (0x4000000) #define FOREACH_G_FLAG (0x8000000) #define FOREACH_F_FLAG2 (0x10000000) #define FOREACH_PS_EXCLUSIVE \ (FOREACH_g_FLAG|FOREACH_a_FLAG|FOREACH_t_FLAG|FOREACH_c_FLAG|FOREACH_p_FLAG|FOREACH_l_FLAG|FOREACH_r_FLAG|FOREACH_m_FLAG) struct foreach_data { ulong flags; int keyword_array[MAX_FOREACH_KEYWORDS]; ulong task_array[MAX_FOREACH_TASKS]; char *comm_array[MAX_FOREACH_COMMS]; ulong pid_array[MAX_FOREACH_PIDS]; ulong arg_array[MAX_FOREACH_ARGS]; struct regex_info { char *pattern; regex_t regex; } regex_info[MAX_REGEX_ARGS]; ulong state; char *reference; int keys; int pids; int tasks; int comms; int args; int regexs; }; struct reference { char *str; ulong cmdflags; ulong hexval; ulong decval; ulong ref1; ulong ref2; void *refp; }; struct offset_table { /* stash of commonly-used offsets */ long list_head_next; /* add new entries to end of table */ long list_head_prev; long task_struct_pid; long task_struct_state; long task_struct_comm; long task_struct_mm; long task_struct_tss; long task_struct_thread; long task_struct_active_mm; long task_struct_tss_eip; long task_struct_tss_esp; long task_struct_tss_ksp; long task_struct_processor; long task_struct_p_pptr; long task_struct_parent; long task_struct_has_cpu; long task_struct_cpus_runnable; long task_struct_thread_eip; long task_struct_thread_esp; long task_struct_thread_ksp; long task_struct_next_task; long task_struct_files; long task_struct_fs; long task_struct_pidhash_next; long task_struct_next_run; long task_struct_flags; long task_struct_sig; long task_struct_signal; long task_struct_blocked; long task_struct_sigpending; long task_struct_pending; long task_struct_sigqueue; long task_struct_sighand; long task_struct_start_time; long task_struct_times; long task_struct_utime; long task_struct_stime; long task_struct_cpu; long task_struct_run_list; long task_struct_pgrp; long task_struct_tgid; long task_struct_namespace; long task_struct_pids; long task_struct_last_run; long task_struct_timestamp; long task_struct_thread_info; long task_struct_nsproxy; long task_struct_rlim; long thread_info_task; long thread_info_cpu; long thread_info_previous_esp; long thread_info_flags; long nsproxy_mnt_ns; long mnt_namespace_root; long mnt_namespace_list; long pid_link_pid; long pid_hash_chain; long hlist_node_next; long hlist_node_pprev; long pid_pid_chain; long thread_struct_eip; long thread_struct_esp; long thread_struct_ksp; long thread_struct_fph; long thread_struct_rip; long thread_struct_rsp; long thread_struct_rsp0; long tms_tms_utime; long tms_tms_stime; long signal_struct_count; long signal_struct_action; long signal_struct_shared_pending; long signal_struct_rlim; long k_sigaction_sa; long sigaction_sa_handler; long sigaction_sa_flags; long sigaction_sa_mask; long sigpending_head; long sigpending_list; long sigpending_signal; long signal_queue_next; long signal_queue_info; long sigqueue_next; long sigqueue_list; long sigqueue_info; long sighand_struct_action; long siginfo_si_signo; long thread_struct_cr3; long thread_struct_ptbr; long thread_struct_pg_tables; long switch_stack_r26; long switch_stack_b0; long switch_stack_ar_bspstore; long switch_stack_ar_pfs; long switch_stack_ar_rnat; long switch_stack_pr; long cpuinfo_ia64_proc_freq; long cpuinfo_ia64_unimpl_va_mask; long cpuinfo_ia64_unimpl_pa_mask; long device_node_type; long device_node_allnext; long device_node_properties; long property_name; long property_value; long property_next; long machdep_calls_setup_residual; long RESIDUAL_VitalProductData; long VPD_ProcessorHz; long bd_info_bi_intfreq; long hwrpb_struct_cycle_freq; long hwrpb_struct_processor_offset; long hwrpb_struct_processor_size; long percpu_struct_halt_PC; long percpu_struct_halt_ra; long percpu_struct_halt_pv; long mm_struct_mmap; long mm_struct_pgd; long mm_struct_rss; long mm_struct_anon_rss; long mm_struct_file_rss; long mm_struct_total_vm; long mm_struct_start_code; long mm_struct_arg_start; long mm_struct_arg_end; long mm_struct_env_start; long mm_struct_env_end; long vm_area_struct_vm_mm; long vm_area_struct_vm_next; long vm_area_struct_vm_end; long vm_area_struct_vm_start; long vm_area_struct_vm_flags; long vm_area_struct_vm_file; long vm_area_struct_vm_offset; long vm_area_struct_vm_pgoff; long vm_struct_addr; long vm_struct_size; long vm_struct_next; long module_size_of_struct; long module_next; long module_size; long module_name; long module_nsyms; long module_syms; long module_flags; long module_num_syms; long module_list; long module_gpl_syms; long module_num_gpl_syms; long module_module_core; long module_core_size; long module_core_text_size; long module_num_symtab; long module_symtab; long module_strtab; long module_kallsyms_start; long kallsyms_header_sections; long kallsyms_header_section_off; long kallsyms_header_symbols; long kallsyms_header_symbol_off; long kallsyms_header_string_off; long kallsyms_symbol_section_off; long kallsyms_symbol_symbol_addr; long kallsyms_symbol_name_off; long kallsyms_section_start; long kallsyms_section_size; long kallsyms_section_name_off; long page_next; long page_prev; long page_next_hash; long page_list; long page_list_next; long page_list_prev; long page_inode; long page_offset; long page_count; long page_flags; long page_mapping; long page_index; long page_buffers; long page_lru; long page_pte; long swap_info_struct_swap_file; long swap_info_struct_swap_vfsmnt; long swap_info_struct_flags; long swap_info_struct_swap_map; long swap_info_struct_swap_device; long swap_info_struct_prio; long swap_info_struct_max; long swap_info_struct_pages; long swap_info_struct_old_block_size; long block_device_bd_inode; long block_device_bd_list; long block_device_bd_disk; long irq_desc_t_status; long irq_desc_t_handler; long irq_desc_t_chip; long irq_desc_t_action; long irq_desc_t_depth; long irqdesc_action; long irqdesc_ctl; long irqdesc_level; long irqaction_handler; long irqaction_flags; long irqaction_mask; long irqaction_name; long irqaction_dev_id; long irqaction_next; long hw_interrupt_type_typename; long hw_interrupt_type_startup; long hw_interrupt_type_shutdown; long hw_interrupt_type_handle; long hw_interrupt_type_enable; long hw_interrupt_type_disable; long hw_interrupt_type_ack; long hw_interrupt_type_end; long hw_interrupt_type_set_affinity; long irq_chip_typename; long irq_chip_startup; long irq_chip_shutdown; long irq_chip_enable; long irq_chip_disable; long irq_chip_ack; long irq_chip_end; long irq_chip_set_affinity; long irq_chip_mask; long irq_chip_mask_ack; long irq_chip_unmask; long irq_chip_eoi; long irq_chip_retrigger; long irq_chip_set_type; long irq_chip_set_wake; long irq_cpustat_t___softirq_active; long irq_cpustat_t___softirq_mask; long fdtable_max_fds; long fdtable_max_fdset; long fdtable_open_fds; long fdtable_fd; long files_struct_fdt; long files_struct_max_fds; long files_struct_max_fdset; long files_struct_open_fds; long files_struct_fd; long files_struct_open_fds_init; long file_f_dentry; long file_f_vfsmnt; long file_f_count; long file_f_path; long path_mnt; long path_dentry; long fs_struct_root; long fs_struct_pwd; long fs_struct_rootmnt; long fs_struct_pwdmnt; long dentry_d_inode; long dentry_d_parent; long dentry_d_name; long dentry_d_covers; long dentry_d_iname; long qstr_len; long qstr_name; long inode_i_mode; long inode_i_op; long inode_i_sb; long inode_u; long inode_i_flock; long inode_i_fop; long inode_i_mapping; long address_space_nrpages; long vfsmount_mnt_next; long vfsmount_mnt_devname; long vfsmount_mnt_dirname; long vfsmount_mnt_sb; long vfsmount_mnt_list; long vfsmount_mnt_mountpoint; long vfsmount_mnt_parent; long namespace_root; long namespace_list; long super_block_s_dirty; long super_block_s_type; long super_block_s_files; long file_system_type_name; long nlm_file_f_file; long file_lock_fl_owner; long nlm_host_h_exportent; long svc_client_cl_ident; long kmem_cache_s_c_nextp; long kmem_cache_s_c_name; long kmem_cache_s_c_num; long kmem_cache_s_c_org_size; long kmem_cache_s_c_flags; long kmem_cache_s_c_offset; long kmem_cache_s_c_firstp; long kmem_cache_s_c_gfporder; long kmem_cache_s_c_magic; long kmem_cache_s_num; long kmem_cache_s_next; long kmem_cache_s_name; long kmem_cache_s_objsize; long kmem_cache_s_flags; long kmem_cache_s_gfporder; long kmem_cache_s_slabs; long kmem_cache_s_slabs_full; long kmem_cache_s_slabs_partial; long kmem_cache_s_slabs_free; long kmem_cache_s_cpudata; long kmem_cache_s_c_align; long kmem_cache_s_colour_off; long cpucache_s_avail; long cpucache_s_limit; long kmem_cache_s_array; long array_cache_avail; long array_cache_limit; long kmem_cache_s_lists; long kmem_list3_slabs_partial; long kmem_list3_slabs_full; long kmem_list3_slabs_free; long kmem_list3_free_objects; long kmem_list3_shared; long kmem_slab_s_s_nextp; long kmem_slab_s_s_freep; long kmem_slab_s_s_inuse; long kmem_slab_s_s_mem; long kmem_slab_s_s_index; long kmem_slab_s_s_offset; long kmem_slab_s_s_magic; long slab_s_list; long slab_s_s_mem; long slab_s_inuse; long slab_s_free; long slab_list; long slab_s_mem; long slab_inuse; long slab_free; long net_device_next; long net_device_name; long net_device_type; long net_device_addr_len; long net_device_ip_ptr; long net_device_dev_list; long net_dev_base_head; long device_next; long device_name; long device_type; long device_ip_ptr; long device_addr_len; long socket_sk; long sock_daddr; long sock_rcv_saddr; long sock_dport; long sock_sport; long sock_num; long sock_type; long sock_family; long sock_common_skc_family; long sock_sk_type; long inet_sock_inet; long inet_opt_daddr; long inet_opt_rcv_saddr; long inet_opt_dport; long inet_opt_sport; long inet_opt_num; long ipv6_pinfo_rcv_saddr; long ipv6_pinfo_daddr; long timer_list_list; long timer_list_next; long timer_list_entry; long timer_list_expires; long timer_list_function; long timer_vec_root_vec; long timer_vec_vec; long tvec_root_s_vec; long tvec_s_vec; long tvec_t_base_s_tv1; long wait_queue_task; long wait_queue_next; long __wait_queue_task; long __wait_queue_head_task_list; long __wait_queue_task_list; long pglist_data_node_zones; long pglist_data_node_mem_map; long pglist_data_node_start_paddr; long pglist_data_node_start_mapnr; long pglist_data_node_size; long pglist_data_node_id; long pglist_data_node_next; long pglist_data_nr_zones; long pglist_data_node_start_pfn; long pglist_data_pgdat_next; long pglist_data_node_present_pages; long pglist_data_node_spanned_pages; long pglist_data_bdata; long page_cache_bucket_chain; long zone_struct_free_pages; long zone_struct_free_area; long zone_struct_zone_pgdat; long zone_struct_name; long zone_struct_size; long zone_struct_memsize; long zone_struct_zone_start_pfn; long zone_struct_zone_start_paddr; long zone_struct_zone_start_mapnr; long zone_struct_zone_mem_map; long zone_struct_inactive_clean_pages; long zone_struct_inactive_clean_list; long zone_struct_inactive_dirty_pages; long zone_struct_active_pages; long zone_struct_pages_min; long zone_struct_pages_low; long zone_struct_pages_high; long zone_free_pages; long zone_free_area; long zone_zone_pgdat; long zone_zone_mem_map; long zone_name; long zone_spanned_pages; long zone_zone_start_pfn; long zone_pages_min; long zone_pages_low; long zone_pages_high; long zone_vm_stat; long neighbour_next; long neighbour_primary_key; long neighbour_ha; long neighbour_dev; long neighbour_nud_state; long neigh_table_hash_buckets; long neigh_table_key_len; long in_device_ifa_list; long in_ifaddr_ifa_next; long in_ifaddr_ifa_address; long pci_dev_global_list; long pci_dev_next; long pci_dev_bus; long pci_dev_devfn; long pci_dev_class; long pci_dev_device; long pci_dev_vendor; long pci_bus_number; long resource_entry_t_from; long resource_entry_t_num; long resource_entry_t_name; long resource_entry_t_next; long resource_name; long resource_start; long resource_end; long resource_sibling; long resource_child; long runqueue_curr; long runqueue_idle; long runqueue_active; long runqueue_expired; long runqueue_arrays; long runqueue_cpu; long cpu_s_idle; long cpu_s_curr; long prio_array_nr_active; long prio_array_queue; long user_regs_struct_ebp; long user_regs_struct_esp; long user_regs_struct_rip; long user_regs_struct_cs; long user_regs_struct_eflags; long user_regs_struct_rsp; long user_regs_struct_ss; long e820map_nr_map; long e820entry_addr; long e820entry_size; long e820entry_type; long char_device_struct_next; long char_device_struct_name; long char_device_struct_fops; long char_device_struct_major; long gendisk_major; long gendisk_disk_name; long gendisk_fops; long blk_major_name_next; long blk_major_name_major; long blk_major_name_name; long radix_tree_root_height; long radix_tree_root_rnode; long x8664_pda_pcurrent; long x8664_pda_data_offset; long x8664_pda_kernelstack; long x8664_pda_irqrsp; long x8664_pda_irqstackptr; long x8664_pda_level4_pgt; long x8664_pda_cpunumber; long x8664_pda_me; long tss_struct_ist; long mem_section_section_mem_map; long vcpu_guest_context_user_regs; long cpu_user_regs_eip; long cpu_user_regs_esp; long cpu_user_regs_rip; long cpu_user_regs_rsp; long unwind_table_core; long unwind_table_init; long unwind_table_address; long unwind_table_size; long unwind_table_link; long unwind_table_name; long rq_cfs; long rq_rt; long rq_nr_running; long cfs_rq_rb_leftmost; long cfs_rq_nr_running; long cfs_rq_tasks_timeline; long task_struct_se; long sched_entity_run_node; long rt_rq_active; long kmem_cache_size; long kmem_cache_objsize; long kmem_cache_offset; long kmem_cache_order; long kmem_cache_local_node; long kmem_cache_objects; long kmem_cache_inuse; long kmem_cache_align; long kmem_cache_name; long kmem_cache_list; long kmem_cache_node; long kmem_cache_cpu_slab; long page_inuse; /* long page_offset; use "old" page->offset */ long page_slab; long page_first_page; long page_freelist; long kmem_cache_node_nr_partial; long kmem_cache_node_nr_slabs; long kmem_cache_node_partial; long kmem_cache_node_full; long pid_numbers; long upid_nr; long upid_ns; long upid_pid_chain; long pid_tasks; long kmem_cache_cpu_freelist; long kmem_cache_cpu_page; long kmem_cache_cpu_node; long kmem_cache_flags; long zone_nr_active; long zone_nr_inactive; long zone_all_unreclaimable; long zone_present_pages; long zone_flags; long zone_pages_scanned; long pcpu_info_vcpu; long pcpu_info_idle; long vcpu_struct_rq; long task_struct_sched_info; long sched_info_last_arrival; long page_objects; long kmem_cache_oo; long char_device_struct_cdev; long char_device_struct_baseminor; long cdev_ops; long probe_next; long probe_dev; long probe_data; long kobj_map_probes; long task_struct_prio; long zone_watermark; long module_sect_attrs; long module_sect_attrs_attrs; long module_sect_attrs_nsections; long module_sect_attr_mattr; long module_sect_attr_name; long module_sect_attr_address; long module_attribute_attr; long attribute_owner; long module_sect_attr_attr; long module_sections_attrs; long swap_info_struct_inuse_pages; long s390_lowcore_psw_save_area; long mm_struct_rss_stat; long mm_rss_stat_count; long module_module_init; long module_init_text_size; long cpu_context_save_fp; long cpu_context_save_sp; long cpu_context_save_pc; long elf_prstatus_pr_pid; long elf_prstatus_pr_reg; long irq_desc_t_name; long thread_info_cpu_context; long unwind_table_list; long unwind_table_start; long unwind_table_stop; long unwind_table_begin_addr; long unwind_table_end_addr; long unwind_idx_addr; long unwind_idx_insn; long signal_struct_nr_threads; long module_init_size; long module_percpu; long radix_tree_node_slots; long s390_stack_frame_back_chain; long s390_stack_frame_r14; long user_regs_struct_eip; long user_regs_struct_rax; long user_regs_struct_eax; long user_regs_struct_rbx; long user_regs_struct_ebx; long user_regs_struct_rcx; long user_regs_struct_ecx; long user_regs_struct_rdx; long user_regs_struct_edx; long user_regs_struct_rsi; long user_regs_struct_esi; long user_regs_struct_rdi; long user_regs_struct_edi; long user_regs_struct_ds; long user_regs_struct_es; long user_regs_struct_fs; long user_regs_struct_gs; long user_regs_struct_rbp; long user_regs_struct_r8; long user_regs_struct_r9; long user_regs_struct_r10; long user_regs_struct_r11; long user_regs_struct_r12; long user_regs_struct_r13; long user_regs_struct_r14; long user_regs_struct_r15; long sched_entity_cfs_rq; long sched_entity_my_q; long sched_entity_on_rq; long task_struct_on_rq; long cfs_rq_curr; long irq_desc_t_irq_data; long irq_desc_t_kstat_irqs; long irq_desc_t_affinity; long irq_data_chip; long irq_data_affinity; long kernel_stat_irqs; long socket_alloc_vfs_inode; long class_devices; long class_p; long class_private_devices; long device_knode_class; long device_node; long gendisk_dev; long gendisk_kobj; long gendisk_part0; long gendisk_queue; long hd_struct_dev; long klist_k_list; long klist_node_n_klist; long klist_node_n_node; long kobject_entry; long kset_list; long request_list_count; long request_queue_in_flight; long request_queue_rq; long subsys_private_klist_devices; long subsystem_kset; long mount_mnt_parent; long mount_mnt_mountpoint; long mount_mnt_list; long mount_mnt_devname; long mount_mnt; long task_struct_exit_state; long timekeeper_xtime; long file_f_op; long file_private_data; long hstate_order; long hugetlbfs_sb_info_hstate; long idr_layer_ary; long idr_layer_layer; long idr_layers; long idr_top; long ipc_id_ary_p; long ipc_ids_entries; long ipc_ids_max_id; long ipc_ids_ipcs_idr; long ipc_ids_in_use; long ipc_namespace_ids; long kern_ipc_perm_deleted; long kern_ipc_perm_key; long kern_ipc_perm_mode; long kern_ipc_perm_uid; long kern_ipc_perm_id; long kern_ipc_perm_seq; long nsproxy_ipc_ns; long shmem_inode_info_swapped; long shmem_inode_info_vfs_inode; long shm_file_data_file; long shmid_kernel_shm_file; long shmid_kernel_shm_nattch; long shmid_kernel_shm_perm; long shmid_kernel_shm_segsz; long shmid_kernel_id; long sem_array_sem_perm; long sem_array_sem_id; long sem_array_sem_nsems; long msg_queue_q_perm; long msg_queue_q_id; long msg_queue_q_cbytes; long msg_queue_q_qnum; long super_block_s_fs_info; long rq_timestamp; long radix_tree_node_height; long rb_root_rb_node; long rb_node_rb_left; long rb_node_rb_right; long rt_prio_array_queue; long task_struct_rt; long sched_rt_entity_run_list; long log_ts_nsec; long log_len; long log_text_len; long log_dict_len; long log_level; long log_flags_level; long timekeeper_xtime_sec; long neigh_table_hash_mask; long sched_rt_entity_my_q; long neigh_table_hash_shift; long neigh_table_nht_ptr; long task_group_parent; long task_group_css; long cgroup_subsys_state_cgroup; long cgroup_dentry; long task_group_rt_rq; long rt_rq_tg; long task_group_cfs_rq; long cfs_rq_tg; long task_group_siblings; long task_group_children; long task_group_cfs_bandwidth; long cfs_rq_throttled; long task_group_rt_bandwidth; long rt_rq_rt_throttled; long rt_rq_highest_prio; long rt_rq_rt_nr_running; long vmap_area_va_start; long vmap_area_va_end; long vmap_area_list; long vmap_area_flags; long vmap_area_vm; long hrtimer_cpu_base_clock_base; long hrtimer_clock_base_offset; long hrtimer_clock_base_active; long hrtimer_clock_base_first; long hrtimer_clock_base_get_time; long hrtimer_base_first; long hrtimer_base_pending; long hrtimer_base_get_time; long hrtimer_node; long hrtimer_list; long hrtimer_softexpires; long hrtimer_expires; long hrtimer_function; long timerqueue_head_next; long timerqueue_node_expires; long timerqueue_node_node; long ktime_t_tv64; long ktime_t_sec; long ktime_t_nsec; long module_taints; long module_gpgsig_ok; long module_license_gplok; long tnt_bit; long tnt_true; long tnt_false; long task_struct_thread_context_fp; long task_struct_thread_context_sp; long task_struct_thread_context_pc; long page_slab_page; long trace_print_flags_mask; long trace_print_flags_name; long task_struct_rss_stat; long task_rss_stat_count; long page_s_mem; long page_active; long hstate_nr_huge_pages; long hstate_free_huge_pages; long hstate_name; long cgroup_kn; long kernfs_node_name; long kernfs_node_parent; long kmem_cache_cpu_partial; long kmem_cache_cpu_cache; long nsproxy_net_ns; long atomic_t_counter; long percpu_counter_count; long mm_struct_mm_count; long task_struct_thread_reg29; long task_struct_thread_reg31; long pt_regs_regs; long pt_regs_cp0_badvaddr; long address_space_page_tree; long page_compound_head; }; struct size_table { /* stash of commonly-used sizes */ long page; long free_area_struct; long zone_struct; long free_area; long zone; long kmem_slab_s; long kmem_cache_s; long kmem_bufctl_t; long slab_s; long slab; long cpucache_s; long array_cache; long swap_info_struct; long mm_struct; long vm_area_struct; long pglist_data; long page_cache_bucket; long pt_regs; long task_struct; long thread_info; long softirq_state; long desc_struct; long umode_t; long dentry; long files_struct; long fdtable; long fs_struct; long file; long inode; long vfsmount; long super_block; long irqdesc; long module; long list_head; long hlist_node; long hlist_head; long irq_cpustat_t; long cpuinfo_x86; long cpuinfo_ia64; long timer_list; long timer_vec_root; long timer_vec; long tvec_root_s; long tvec_s; long tvec_t_base_s; long wait_queue; long __wait_queue; long device; long net_device; long sock; long signal_struct; long sigpending_signal; long signal_queue; long sighand_struct; long sigqueue; long k_sigaction; long resource_entry_t; long resource; long runqueue; long irq_desc_t; long task_union; long thread_union; long prio_array; long user_regs_struct; long switch_stack; long vm_area_struct_vm_flags; long e820map; long e820entry; long cpu_s; long pgd_t; long kallsyms_header; long kallsyms_symbol; long kallsyms_section; long irq_ctx; long block_device; long blk_major_name; long gendisk; long address_space; long char_device_struct; long inet_sock; long in6_addr; long socket; long spinlock_t; long radix_tree_root; long radix_tree_node; long x8664_pda; long ppc64_paca; long gate_struct; long tss_struct; long task_struct_start_time; long cputime_t; long mem_section; long pid_link; long unwind_table; long rlimit; long kmem_cache; long kmem_cache_node; long upid; long kmem_cache_cpu; long cfs_rq; long pcpu_info; long vcpu_struct; long cdev; long probe; long kobj_map; long page_flags; long module_sect_attr; long task_struct_utime; long task_struct_stime; long cpu_context_save; long elf_prstatus; long note_buf; long unwind_idx; long softirq_action; long irq_data; long s390_stack_frame; long percpu_data; long sched_entity; long kernel_stat; long subsystem; long class_private; long rq_in_flight; long class_private_devices; long mount; long hstate; long ipc_ids; long shmid_kernel; long sem_array; long msg_queue; long log; long log_level; long rt_rq; long task_group; long vmap_area; long hrtimer_clock_base; long hrtimer_base; long tnt; long trace_print_flags; }; struct array_table { int kmem_cache_s_name; int kmem_cache_s_c_name; int kmem_cache_s_array; int kmem_cache_s_cpudata; int irq_desc; int irq_action; int log_buf; int timer_vec_vec; int timer_vec_root_vec; int tvec_s_vec; int tvec_root_s_vec; int page_hash_table; int net_device_name; int neigh_table_hash_buckets; int neighbour_ha; int swap_info; int pglist_data_node_zones; int zone_struct_free_area; int zone_free_area; int free_area; int free_area_DIMENSION; int prio_array_queue; int height_to_maxindex; int pid_hash; int kmem_cache_node; int kmem_cache_cpu_slab; int rt_prio_array_queue; }; /* * The following set of macros use gdb to determine structure, union, * or member sizes/offsets. They should be used only during initialization * of the offset_table or size_table, or with data structures whose names * or members are only known/specified during runtime. */ #define MEMBER_SIZE_REQUEST ((struct datatype_member *)(-1)) #define ANON_MEMBER_OFFSET_REQUEST ((struct datatype_member *)(-2)) #define MEMBER_TYPE_REQUEST ((struct datatype_member *)(-3)) #define STRUCT_SIZE_REQUEST ((struct datatype_member *)(-4)) #define STRUCT_SIZE(X) datatype_info((X), NULL, STRUCT_SIZE_REQUEST) #define UNION_SIZE(X) datatype_info((X), NULL, STRUCT_SIZE_REQUEST) #define STRUCT_EXISTS(X) (datatype_info((X), NULL, STRUCT_SIZE_REQUEST) >= 0) #define DATATYPE_SIZE(X) datatype_info((X)->name, NULL, (X)) #define MEMBER_OFFSET(X,Y) datatype_info((X), (Y), NULL) #define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) #define MEMBER_SIZE(X,Y) datatype_info((X), (Y), MEMBER_SIZE_REQUEST) #define MEMBER_TYPE(X,Y) datatype_info((X), (Y), MEMBER_TYPE_REQUEST) #define ANON_MEMBER_OFFSET(X,Y) datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST) /* * The following set of macros can only be used with pre-intialized fields * in the offset table, size table or array_table. */ #define OFFSET(X) (OFFSET_verify(offset_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X)) #define SIZE(X) (SIZE_verify(size_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X)) #define INVALID_OFFSET (-1) #define INVALID_MEMBER(X) (offset_table.X == INVALID_OFFSET) #define INVALID_SIZE(X) (size_table.X == -1) #define VALID_SIZE(X) (size_table.X >= 0) #define VALID_STRUCT(X) (size_table.X >= 0) #define VALID_MEMBER(X) (offset_table.X >= 0) #define ARRAY_LENGTH(X) (array_table.X) #define ASSIGN_OFFSET(X) (offset_table.X) #define ASSIGN_SIZE(X) (size_table.X) #define OFFSET_OPTION(X,Y) (OFFSET_option(offset_table.X, offset_table.Y, (char *)__FUNCTION__, __FILE__, __LINE__, #X, #Y)) #define SIZE_OPTION(X,Y) (SIZE_option(size_table.X, size_table.Y, (char *)__FUNCTION__, __FILE__, __LINE__, #X, #Y)) #define MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) #define STRUCT_SIZE_INIT(X, Y) (ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) #define ARRAY_LENGTH_INIT(A, B, C, D, E) ((A) = get_array_length(C, D, E)) #define ARRAY_LENGTH_INIT_ALT(A, B, C, D, E) ((A) = get_array_length_alt(B, C, D, E)) #define MEMBER_SIZE_INIT(X, Y, Z) (ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) #define ANON_MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = ANON_MEMBER_OFFSET(Y, Z)) /* * For use with non-debug kernels. */ struct builtin_debug_table { char *release; char *machine_type; struct offset_table *offset_table; struct size_table *size_table; struct array_table *array_table; }; /* * Facilitators for pulling correctly-sized data out of a buffer at a * known address. */ #define INT(ADDR) *((int *)((char *)(ADDR))) #define UINT(ADDR) *((uint *)((char *)(ADDR))) #define LONG(ADDR) *((long *)((char *)(ADDR))) #define ULONG(ADDR) *((ulong *)((char *)(ADDR))) #define ULONGLONG(ADDR) *((ulonglong *)((char *)(ADDR))) #define ULONG_PTR(ADDR) *((ulong **)((char *)(ADDR))) #define USHORT(ADDR) *((ushort *)((char *)(ADDR))) #define SHORT(ADDR) *((short *)((char *)(ADDR))) #define UCHAR(ADDR) *((unsigned char *)((char *)(ADDR))) #define VOID_PTR(ADDR) *((void **)((char *)(ADDR))) struct node_table { int node_id; ulong pgdat; ulong mem_map; ulong size; ulong present; ulonglong start_paddr; ulong start_mapnr; }; struct meminfo; struct slab_data; #define VMA_CACHE (20) struct vm_table { /* kernel VM-related data */ ulong flags; ulong kernel_pgd[NR_CPUS]; ulong high_memory; ulong vmalloc_start; ulong mem_map; long total_pages; ulong totalram_pages; ulong totalhigh_pages; ulong num_physpages; ulong max_mapnr; ulong kmem_max_c_num; ulong kmem_max_limit; ulong kmem_max_cpus; ulong kmem_cache_count; ulong kmem_cache_len_nodes; ulong PG_reserved; ulong PG_slab; ulong PG_head_tail_mask; int kmem_cache_namelen; ulong page_hash_table; int page_hash_table_len; int paddr_prlen; int numnodes; int nr_zones; int nr_free_areas; struct node_table *node_table; void (*dump_free_pages)(struct meminfo *); void (*dump_kmem_cache)(struct meminfo *); struct slab_data *slab_data; uint nr_swapfiles; ulong last_swap_read; char *swap_info_struct; char *vma_cache; ulong cached_vma[VMA_CACHE]; ulong cached_vma_hits[VMA_CACHE]; int vma_cache_index; ulong vma_cache_fills; void *mem_sec; char *mem_section; int ZONE_HIGHMEM; ulong *node_online_map; int node_online_map_len; int nr_vm_stat_items; char **vm_stat_items; int cpu_slab_type; int nr_vm_event_items; char **vm_event_items; int nr_bad_slab_caches; ulong *bad_slab_caches; int nr_pageflags; struct pageflags_data { ulong mask; char *name; } *pageflags_data; }; #define NODES (0x1) #define ZONES (0x2) #define PERCPU_KMALLOC_V1 (0x4) #define COMMON_VADDR (0x8) #define KMEM_CACHE_INIT (0x10) #define V_MEM_MAP (0x20) #define PERCPU_KMALLOC_V2 (0x40) #define KMEM_CACHE_UNAVAIL (0x80) #define FLATMEM (0x100) #define DISCONTIGMEM (0x200) #define SPARSEMEM (0x400) #define SPARSEMEM_EX (0x800) #define PERCPU_KMALLOC_V2_NODES (0x1000) #define KMEM_CACHE_DELAY (0x2000) #define NODES_ONLINE (0x4000) #define VM_STAT (0x8000) #define KMALLOC_SLUB (0x10000) #define CONFIG_NUMA (0x20000) #define VM_EVENT (0x40000) #define PGCNT_ADJ (0x80000) #define VM_INIT (0x100000) #define SWAPINFO_V1 (0x200000) #define SWAPINFO_V2 (0x400000) #define NODELISTS_IS_PTR (0x800000) #define KMALLOC_COMMON (0x1000000) #define USE_VMAP_AREA (0x2000000) #define PAGEFLAGS (0x4000000) #define SLAB_OVERLOAD_PAGE (0x8000000) #define SLAB_CPU_CACHE (0x10000000) #define IS_FLATMEM() (vt->flags & FLATMEM) #define IS_DISCONTIGMEM() (vt->flags & DISCONTIGMEM) #define IS_SPARSEMEM() (vt->flags & SPARSEMEM) #define IS_SPARSEMEM_EX() (vt->flags & SPARSEMEM_EX) #define COMMON_VADDR_SPACE() (vt->flags & COMMON_VADDR) #define PADDR_PRLEN (vt->paddr_prlen) struct datatype_member { /* minimal definition of a structure/union */ char *name; /* and possibly a member within it */ char *member; ulong type; long size; long member_offset; long member_size; int member_typecode; ulong flags; char *tagname; /* tagname and value for enums */ long value; ulong vaddr; }; #define union_name struct_name struct list_data { /* generic structure used by do_list() to walk */ ulong flags; /* through linked lists in the kernel */ ulong start; long member_offset; long list_head_offset; ulong end; ulong searchfor; char **structname; int structname_args; char *header; ulong *list_ptr; int (*callback_func)(void *, void *); void *callback_data; long struct_list_offset; }; #define LIST_OFFSET_ENTERED (VERBOSE << 1) #define LIST_START_ENTERED (VERBOSE << 2) #define LIST_HEAD_FORMAT (VERBOSE << 3) #define LIST_HEAD_POINTER (VERBOSE << 4) #define RETURN_ON_DUPLICATE (VERBOSE << 5) #define RETURN_ON_LIST_ERROR (VERBOSE << 6) #define LIST_STRUCT_RADIX_10 (VERBOSE << 7) #define LIST_STRUCT_RADIX_16 (VERBOSE << 8) #define LIST_HEAD_REVERSE (VERBOSE << 9) #define LIST_ALLOCATE (VERBOSE << 10) #define LIST_CALLBACK (VERBOSE << 11) #define CALLBACK_RETURN (VERBOSE << 12) struct tree_data { ulong flags; ulong start; long node_member_offset; char **structname; int structname_args; int count; }; #define TREE_ROOT_OFFSET_ENTERED (VERBOSE << 1) #define TREE_NODE_OFFSET_ENTERED (VERBOSE << 2) #define TREE_NODE_POINTER (VERBOSE << 3) #define TREE_POSITION_DISPLAY (VERBOSE << 4) #define TREE_STRUCT_RADIX_10 (VERBOSE << 5) #define TREE_STRUCT_RADIX_16 (VERBOSE << 6) #define ALIAS_RUNTIME (1) #define ALIAS_RCLOCAL (2) #define ALIAS_RCHOME (3) #define ALIAS_BUILTIN (4) struct alias_data { /* command alias storage */ struct alias_data *next; char *alias; int argcnt; int size; int origin; char *args[MAXARGS]; char argbuf[1]; }; struct rb_node { unsigned long rb_parent_color; #define RB_RED 0 #define RB_BLACK 1 struct rb_node *rb_right; struct rb_node *rb_left; }; struct rb_root { struct rb_node *rb_node; }; #define NUMBER_STACKFRAMES 4 #define SAVE_RETURN_ADDRESS(retaddr) \ { \ int i; \ int saved_stacks; \ \ saved_stacks = backtrace((void **)retaddr, NUMBER_STACKFRAMES); \ \ /* explicitely zero out the invalid addresses */ \ for (i = saved_stacks; i < NUMBER_STACKFRAMES; i++) \ retaddr[i] = 0; \ } #endif /* !GDB_COMMON */ #define SYMBOL_NAME_USED (0x1) #define MODULE_SYMBOL (0x2) #define IS_MODULE_SYMBOL(SYM) ((SYM)->flags & MODULE_SYMBOL) struct syment { ulong value; char *name; struct syment *val_hash_next; struct syment *name_hash_next; char type; unsigned char cnt; unsigned char flags; unsigned char pad2; }; #define NAMESPACE_INIT (1) #define NAMESPACE_REUSE (2) #define NAMESPACE_FREE (3) #define NAMESPACE_INSTALL (4) #define NAMESPACE_COMPLETE (5) struct symbol_namespace { char *address; size_t size; long index; long cnt; }; struct downsized { char *name; struct downsized *next; }; #define SYMVAL_HASH (512) #define SYMVAL_HASH_INDEX(vaddr) \ (((vaddr) >> machdep->pageshift) % SYMVAL_HASH) #define SYMNAME_HASH (512) #define SYMNAME_HASH_INDEX(name) \ ((name[0] ^ (name[strlen(name)-1] * name[strlen(name)/2])) % SYMNAME_HASH) #define PATCH_KERNEL_SYMBOLS_START ((char *)(1)) #define PATCH_KERNEL_SYMBOLS_STOP ((char *)(2)) #ifndef GDB_COMMON struct symbol_table_data { ulong flags; #ifdef GDB_5_3 struct _bfd *bfd; #else struct bfd *bfd; #endif struct sec *sections; struct syment *symtable; struct syment *symend; long symcnt; ulong syment_size; struct symval_hash_chain { struct syment *val_hash_head; struct syment *val_hash_last; } symval_hash[SYMVAL_HASH]; double val_hash_searches; double val_hash_iterations; struct syment *symname_hash[SYMNAME_HASH]; struct symbol_namespace kernel_namespace; struct syment *ext_module_symtable; struct syment *ext_module_symend; long ext_module_symcnt; struct symbol_namespace ext_module_namespace; int mods_installed; struct load_module *current; struct load_module *load_modules; off_t dwarf_eh_frame_file_offset; ulong dwarf_eh_frame_size; ulong first_ksymbol; ulong __per_cpu_start; ulong __per_cpu_end; off_t dwarf_debug_frame_file_offset; ulong dwarf_debug_frame_size; ulong first_section_start; ulong last_section_end; ulong _stext_vmlinux; struct downsized downsized; }; /* flags for st */ #define KERNEL_SYMS (0x1) #define MODULE_SYMS (0x2) #define LOAD_MODULE_SYMS (0x4) #define INSMOD_BUILTIN (0x8) #define GDB_SYMS_PATCHED (0x10) #define GDB_PATCHED() (st->flags & GDB_SYMS_PATCHED) #define NO_SEC_LOAD (0x20) #define NO_SEC_CONTENTS (0x40) #define FORCE_DEBUGINFO (0x80) #define CRC_MATCHES (0x100) #define ADD_SYMBOL_FILE (0x200) #define USE_OLD_ADD_SYM (0x400) #define PERCPU_SYMS (0x800) #define MODSECT_UNKNOWN (0x1000) #define MODSECT_V1 (0x2000) #define MODSECT_V2 (0x4000) #define MODSECT_V3 (0x8000) #define MODSECT_VMASK (MODSECT_V1|MODSECT_V2|MODSECT_V3) #define NO_STRIP (0x10000) #define NO_LINE_NUMBERS() ((st->flags & GDB_SYMS_PATCHED) && !(kt->flags2 & KASLR)) #endif /* !GDB_COMMON */ #define ALL_MODULES (0) #define MAX_MOD_NAMELIST (256) #define MAX_MOD_NAME (64) #define MAX_MOD_SEC_NAME (64) #define MOD_EXT_SYMS (0x1) #define MOD_LOAD_SYMS (0x2) #define MOD_REMOTE (0x4) #define MOD_KALLSYMS (0x8) #define MOD_INITRD (0x10) #define MOD_NOPATCH (0x20) #define MOD_INIT (0x40) #define MOD_DO_READNOW (0x80) #define SEC_FOUND (0x10000) struct mod_section_data { #if defined(GDB_5_3) || defined(GDB_6_0) struct sec *section; #else struct bfd_section *section; #endif char name[MAX_MOD_SEC_NAME]; ulong offset; ulong size; int priority; int flags; }; struct load_module { ulong mod_base; ulong module_struct; long mod_size; char mod_namelist[MAX_MOD_NAMELIST]; char mod_name[MAX_MOD_NAME]; ulong mod_flags; struct syment *mod_symtable; struct syment *mod_symend; long mod_ext_symcnt; struct syment *mod_ext_symtable; struct syment *mod_ext_symend; long mod_load_symcnt; struct syment *mod_load_symtable; struct syment *mod_load_symend; long mod_symalloc; struct symbol_namespace mod_load_namespace; ulong mod_size_of_struct; ulong mod_text_start; ulong mod_etext_guess; ulong mod_rodata_start; ulong mod_data_start; ulong mod_bss_start; int mod_sections; struct mod_section_data *mod_section_data; ulong mod_init_text_size; ulong mod_init_module_ptr; ulong mod_init_size; struct syment *mod_init_symtable; struct syment *mod_init_symend; ulong mod_percpu; ulong mod_percpu_size; }; #define IN_MODULE(A,L) \ (((ulong)(A) >= (L)->mod_base) && ((ulong)(A) < ((L)->mod_base+(L)->mod_size))) #define IN_MODULE_INIT(A,L) \ (((ulong)(A) >= (L)->mod_init_module_ptr) && ((ulong)(A) < ((L)->mod_init_module_ptr+(L)->mod_init_size))) #define IN_MODULE_PERCPU(A,L) \ (((ulong)(A) >= (L)->mod_percpu) && ((ulong)(A) < ((L)->mod_percpu+(L)->mod_percpu_size))) #define MODULE_PERCPU_SYMS_LOADED(L) ((L)->mod_percpu && (L)->mod_percpu_size) #ifndef GDB_COMMON #define KVADDR (0x1) #define UVADDR (0x2) #define PHYSADDR (0x4) #define XENMACHADDR (0x8) #define FILEADDR (0x10) #define AMBIGUOUS (~0) #define USE_USER_PGD (UVADDR << 2) #define VERIFY_ADDR (0x8) /* vm_area_dump() flags -- must follow */ #define PRINT_INODES (0x10) /* KVADDR, UVADDR, and PHYSADDR */ #define PRINT_MM_STRUCT (0x20) #define PRINT_VMA_STRUCTS (0x40) #define PRINT_SINGLE_VMA (0x80) #define PRINT_RADIX_10 (0x100) #define PRINT_RADIX_16 (0x200) #define PRINT_NRPAGES (0x400) #define MIN_PAGE_SIZE (4096) #define PTOB(X) ((ulonglong)(X) << machdep->pageshift) #define BTOP(X) ((ulonglong)(X) >> machdep->pageshift) #define PAGESIZE() (machdep->pagesize) #define PAGESHIFT() (machdep->pageshift) #define PAGEOFFSET(X) (((ulong)(X)) & machdep->pageoffset) #define VIRTPAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PHYSPAGEBASE(X) (((physaddr_t)(X)) & (physaddr_t)machdep->pagemask) /* * Sparse memory stuff * These must follow the definitions in the kernel mmzone.h */ #define SECTION_SIZE_BITS() (machdep->section_size_bits) #define MAX_PHYSMEM_BITS() (machdep->max_physmem_bits) #define SECTIONS_SHIFT() (MAX_PHYSMEM_BITS() - SECTION_SIZE_BITS()) #define PA_SECTION_SHIFT() (SECTION_SIZE_BITS()) #define PFN_SECTION_SHIFT() (SECTION_SIZE_BITS() - PAGESHIFT()) #define NR_MEM_SECTIONS() (1UL << SECTIONS_SHIFT()) #define PAGES_PER_SECTION() (1UL << PFN_SECTION_SHIFT()) #define PAGE_SECTION_MASK() (~(PAGES_PER_SECTION()-1)) #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT()) #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT()) #define SECTIONS_PER_ROOT() (machdep->sections_per_root) /* CONFIG_SPARSEMEM_EXTREME */ #define _SECTIONS_PER_ROOT_EXTREME() (PAGESIZE() / SIZE(mem_section)) /* !CONFIG_SPARSEMEM_EXTREME */ #define _SECTIONS_PER_ROOT() (1) #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT()) #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define NR_SECTION_ROOTS() (DIV_ROUND_UP(NR_MEM_SECTIONS(), SECTIONS_PER_ROOT())) #define SECTION_ROOT_MASK() (SECTIONS_PER_ROOT() - 1) /* * Machine specific stuff */ #ifdef ARM #define _32BIT_ #define MACHINE_TYPE "ARM" #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) \ ((unsigned long)(X)-(machdep->machspec->phys_base)+(machdep->kvbase)) #define VTOP(X) \ ((unsigned long)(X)-(machdep->kvbase)+(machdep->machspec->phys_base)) #define IS_VMALLOC_ADDR(X) arm_is_vmalloc_addr((ulong)(X)) #define DEFAULT_MODULES_VADDR (machdep->kvbase - 16 * 1024 * 1024) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define PGDIR_SHIFT (21) #define PTRS_PER_PTE (512) #define PTRS_PER_PGD (2048) #define PGD_OFFSET(vaddr) ((vaddr) >> PGDIR_SHIFT) #define PTE_OFFSET(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) #define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 28 #define _MAX_PHYSMEM_BITS 32 /*add for LPAE*/ typedef unsigned long long u64; typedef signed int s32; typedef u64 pgd_t; typedef u64 pmd_t; typedef u64 pte_t; #define PMDSIZE() (PAGESIZE()) #define LPAE_PGDIR_SHIFT (30) #define LPAE_PMDIR_SHIFT (21) #define LPAE_PGD_OFFSET(vaddr) ((vaddr) >> LPAE_PGDIR_SHIFT) #define LPAE_PMD_OFFSET(vaddr) (((vaddr) >> LPAE_PMDIR_SHIFT) & \ ((1<<(LPAE_PGDIR_SHIFT-LPAE_PMDIR_SHIFT))-1)) #define _SECTION_SIZE_BITS_LPAE 28 #define _MAX_PHYSMEM_BITS_LPAE 36 /* * #define PTRS_PER_PTE 512 * #define PTRS_PER_PMD 512 * #define PTRS_PER_PGD 4 * */ #define LPAE_PGDIR_SIZE() 32 #define LPAE_PGDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PGDIR_SIZE() - 1)) #define LPAE_PMDIR_SIZE() 4096 #define LPAE_PMDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PMDIR_SIZE() - 1)) #define LPAE_PTEDIR_SIZE() 4096 #define LPAE_PTEDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PTEDIR_SIZE() - 1)) /*section size for LPAE is 2MiB*/ #define LPAE_SECTION_PAGE_MASK (~((MEGABYTES(2))-1)) #define _PHYSICAL_MASK_LPAE ((1ULL << _MAX_PHYSMEM_BITS_LPAE) - 1) #define PAGE_BASE_MASK ((u64)((s32)machdep->pagemask & _PHYSICAL_MASK_LPAE)) #define LPAE_PAGEBASE(X) (((ulonglong)(X)) & PAGE_BASE_MASK) #define LPAE_VTOP(X) \ ((unsigned long long)(unsigned long)(X) - \ (machdep->kvbase) + (machdep->machspec->phys_base)) #define IS_LAST_PGD_READ_LPAE(pgd) ((pgd) == \ machdep->machspec->last_pgd_read_lpae) #define IS_LAST_PMD_READ_LPAE(pmd) ((pmd) == \ machdep->machspec->last_pmd_read_lpae) #define IS_LAST_PTBL_READ_LPAE(ptbl) ((ptbl) == \ machdep->machspec->last_ptbl_read_lpae) #define FILL_PGD_LPAE(PGD, TYPE, SIZE) \ if (!IS_LAST_PGD_READ_LPAE(PGD)) { \ readmem((ulonglong)(PGD), TYPE, machdep->pgd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pgd_read_lpae \ = (ulonglong)(PGD); \ } #define FILL_PMD_LPAE(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ_LPAE(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pmd_read_lpae \ = (ulonglong)(PMD); \ } #define FILL_PTBL_LPAE(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ_LPAE(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->machspec->last_ptbl_read_lpae \ = (ulonglong)(PTBL); \ } #endif /* ARM */ #ifndef EM_AARCH64 #define EM_AARCH64 183 #endif #ifdef ARM64 #define _64BIT_ #define MACHINE_TYPE "ARM64" #define PTOV(X) \ ((unsigned long)(X)-(machdep->machspec->phys_offset)+(machdep->machspec->page_offset)) #define VTOP(X) \ ((unsigned long)(X)-(machdep->machspec->page_offset)+(machdep->machspec->phys_offset)) #define USERSPACE_TOP (machdep->machspec->userspace_top) #define PAGE_OFFSET (machdep->machspec->page_offset) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) #define VMEMMAP_END (machdep->machspec->vmemmap_end) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define IS_VMALLOC_ADDR(X) arm64_IS_VMALLOC_ADDR((ulong)(X)) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) /* * 48-bit physical address supported. */ #define PHYS_MASK_SHIFT (48) #define PHYS_MASK (((1UL) << PHYS_MASK_SHIFT) - 1) typedef signed int s32; /* * 3-levels / 4K pages */ #define PTRS_PER_PGD_L3_4K (512) #define PTRS_PER_PMD_L3_4K (512) #define PTRS_PER_PTE_L3_4K (512) #define PGDIR_SHIFT_L3_4K (30) #define PGDIR_SIZE_L3_4K ((1UL) << PGDIR_SHIFT_L3_4K) #define PGDIR_MASK_L3 4K (~(PGDIR_SIZE_L3_4K-1)) #define PMD_SHIFT_L3_4K (21) #define PMD_SIZE_L3_4K (1UL << PMD_SHIFT_4K) #define PMD_MASK_L3 4K (~(PMD_SIZE_4K-1)) /* * 2-levels / 64K pages */ #define PTRS_PER_PGD_L2_64K (8192) #define PTRS_PER_PTE_L2_64K (8192) #define PGDIR_SHIFT_L2_64K (29) #define PGDIR_SIZE_L2_64K ((1UL) << PGDIR_SHIFT_L2_64K) #define PGDIR_MASK_L2_64K (~(PGDIR_SIZE_L2_64K-1)) /* * Software defined PTE bits definition. * (arch/arm64/include/asm/pgtable.h) */ #define PTE_VALID (1UL << 0) #define PTE_DIRTY (1UL << 55) #define PTE_SPECIAL (1UL << 56) /* * Level 3 descriptor (PTE). * (arch/arm64/include/asm/pgtable-hwdef.h) */ #define PTE_TYPE_MASK (3UL << 0) #define PTE_TYPE_FAULT (0UL << 0) #define PTE_TYPE_PAGE (3UL << 0) #define PTE_USER (1UL << 6) /* AP[1] */ #define PTE_RDONLY (1UL << 7) /* AP[2] */ #define PTE_SHARED (3UL << 8) /* SH[1:0], inner shareable */ #define PTE_AF (1UL << 10) /* Access Flag */ #define PTE_NG (1UL << 11) /* nG */ #define PTE_PXN (1UL << 53) /* Privileged XN */ #define PTE_UXN (1UL << 54) /* User XN */ #define __swp_type(x) arm64_swp_type(x) #define __swp_offset(x) arm64_swp_offset(x) #define SWP_TYPE(x) __swp_type(x) #define SWP_OFFSET(x) __swp_offset(x) #define KSYMS_START (0x1) #define PHYS_OFFSET (0x2) #define VM_L2_64K (0x4) #define VM_L3_4K (0x8) #define KDUMP_ENABLED (0x10) /* * sources: Documentation/arm64/memory.txt * arch/arm64/include/asm/memory.h * arch/arm64/include/asm/pgtable.h */ #define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) << (machdep->machspec->VA_BITS - 1)) #define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS) #define ARM64_MODULES_VADDR (ARM64_PAGE_OFFSET - MEGABYTES(64)) #define ARM64_MODULES_END (ARM64_PAGE_OFFSET - 1) #define ARM64_VMALLOC_START ((0xffffffffffffffffUL) << machdep->machspec->VA_BITS) /* * The following 3 definitions are the original values, but are obsolete * for 3.17 and later kernels because they are now build-time calculations. * They all depend on the kernel's new VMEMMAP_SIZE value, which is dependent * upon the size of struct page. Accordingly, arm64_calc_virtual_memory_ranges() * determines their values at POST_GDB time. */ #define ARM64_VMALLOC_END (ARM64_PAGE_OFFSET - 0x400000000UL - KILOBYTES(64) - 1) #define ARM64_VMEMMAP_VADDR ((ARM64_VMALLOC_END+1) + KILOBYTES(64)) #define ARM64_VMEMMAP_END (ARM64_VMEMMAP_VADDR + GIGABYTES(8UL) - 1) #define ARM64_STACK_SIZE (16384) #define _SECTION_SIZE_BITS 30 #define _MAX_PHYSMEM_BITS 40 typedef unsigned long long __u64; typedef unsigned long long u64; struct arm64_user_pt_regs { __u64 regs[31]; __u64 sp; __u64 pc; __u64 pstate; }; struct arm64_pt_regs { union { struct arm64_user_pt_regs user_regs; struct { u64 regs[31]; u64 sp; u64 pc; u64 pstate; }; }; u64 orig_x0; u64 syscallno; }; /* AArch32 CPSR bits */ #define PSR_MODE32_BIT 0x00000010 #define TIF_SIGPENDING (0) #define display_idt_table() \ error(FATAL, "-d option is not applicable to ARM64 architecture\n") struct machine_specific { ulong flags; ulong userspace_top; ulong page_offset; ulong vmalloc_start_addr; ulong vmalloc_end; ulong vmemmap_vaddr; ulong vmemmap_end; ulong modules_vaddr; ulong modules_end; ulong phys_offset; ulong __exception_text_start; ulong __exception_text_end; struct arm64_pt_regs *panic_task_regs; ulong PTE_PROT_NONE; ulong PTE_FILE; ulong VA_BITS; ulong __SWP_TYPE_BITS; ulong __SWP_TYPE_SHIFT; ulong __SWP_TYPE_MASK; ulong __SWP_OFFSET_BITS; ulong __SWP_OFFSET_SHIFT; ulong __SWP_OFFSET_MASK; ulong crash_kexec_start; ulong crash_kexec_end; ulong crash_save_cpu_start; ulong crash_save_cpu_end; ulong kernel_flags; }; struct arm64_stackframe { unsigned long fp; unsigned long sp; unsigned long pc; }; #endif /* ARM64 */ #ifdef MIPS #define _32BIT_ #define MACHINE_TYPE "MIPS" #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) ((unsigned long)(X) + 0x80000000lu) #define VTOP(X) ((unsigned long)(X) & 0x1ffffffflu) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define DEFAULT_MODULES_VADDR (machdep->kvbase - 16 * 1024 * 1024) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #endif /* MIPS */ #ifdef X86 #define _32BIT_ #define MACHINE_TYPE "X86" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define KVBASE_MASK (0x1ffffff) #define PGDIR_SHIFT_2LEVEL (22) #define PTRS_PER_PTE_2LEVEL (1024) #define PTRS_PER_PGD_2LEVEL (1024) #define PGDIR_SHIFT_3LEVEL (30) #define PTRS_PER_PTE_3LEVEL (512) #define PTRS_PER_PGD_3LEVEL (4) #define PMD_SHIFT (21) /* only used by PAE translators */ #define PTRS_PER_PMD (512) /* only used by PAE translators */ #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_4M 0x080 /* 4 MB page, Pentium+, if present.. */ #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ #define _PAGE_PROTNONE (machdep->machspec->page_protnone) #define _PAGE_NX (0x8000000000000000ULL) #define NONPAE_PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define NX_BIT_MASK (0x7fffffffffffffffULL) #define PAE_PAGEBASE(X) (((unsigned long long)(X)) & ((unsigned long long)machdep->pagemask) & NX_BIT_MASK) #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type_PAE(entry) (((entry) >> 32) & 0x1f) #define __swp_type_nonPAE(entry) (((entry) >> 1) & 0x1f) #define __swp_offset_PAE(entry) (((entry) >> 32) >> 5) #define __swp_offset_nonPAE(entry) ((entry) >> 8) #define __swp_type(entry) (machdep->flags & PAE ? \ __swp_type_PAE(entry) : __swp_type_nonPAE(entry)) #define __swp_offset(entry) (machdep->flags & PAE ? \ __swp_offset_PAE(entry) : __swp_offset_nonPAE(entry)) #define TIF_SIGPENDING (2) // CONFIG_X86_PAE #define _SECTION_SIZE_BITS_PAE_ORIG 30 #define _SECTION_SIZE_BITS_PAE_2_6_26 29 #define _MAX_PHYSMEM_BITS_PAE 36 // !CONFIG_X86_PAE #define _SECTION_SIZE_BITS 26 #define _MAX_PHYSMEM_BITS 32 #define IS_LAST_PMD_READ_PAE(pmd) ((ulong)(pmd) == machdep->machspec->last_pmd_read_PAE) #define IS_LAST_PTBL_READ_PAE(ptbl) ((ulong)(ptbl) == machdep->machspec->last_ptbl_read_PAE) #define FILL_PMD_PAE(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ_PAE(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pmd_read_PAE = (ulonglong)(PMD); \ } #define FILL_PTBL_PAE(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ_PAE(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->machspec->last_ptbl_read_PAE = (ulonglong)(PTBL); \ } #endif /* X86 */ #ifdef X86_64 #define _64BIT_ #define MACHINE_TYPE "X86_64" #define USERSPACE_TOP (machdep->machspec->userspace_top) #define PAGE_OFFSET (machdep->machspec->page_offset) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) #define VMEMMAP_END (machdep->machspec->vmemmap_end) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define __START_KERNEL_map 0xffffffff80000000UL #define MODULES_LEN (MODULES_END - MODULES_VADDR) #define USERSPACE_TOP_ORIG 0x0000008000000000 #define PAGE_OFFSET_ORIG 0x0000010000000000 #define VMALLOC_START_ADDR_ORIG 0xffffff0000000000 #define VMALLOC_END_ORIG 0xffffff7fffffffff #define MODULES_VADDR_ORIG 0xffffffffa0000000 #define MODULES_END_ORIG 0xffffffffafffffff #define USERSPACE_TOP_2_6_11 0x0000800000000000 #define PAGE_OFFSET_2_6_11 0xffff810000000000 #define VMALLOC_START_ADDR_2_6_11 0xffffc20000000000 #define VMALLOC_END_2_6_11 0xffffe1ffffffffff #define MODULES_VADDR_2_6_11 0xffffffff88000000 #define MODULES_END_2_6_11 0xfffffffffff00000 #define VMEMMAP_VADDR_2_6_24 0xffffe20000000000 #define VMEMMAP_END_2_6_24 0xffffe2ffffffffff #define MODULES_VADDR_2_6_26 0xffffffffa0000000 #define PAGE_OFFSET_2_6_27 0xffff880000000000 #define MODULES_END_2_6_27 0xffffffffff000000 #define USERSPACE_TOP_XEN 0x0000800000000000 #define PAGE_OFFSET_XEN 0xffff880000000000 #define VMALLOC_START_ADDR_XEN 0xffffc20000000000 #define VMALLOC_END_XEN 0xffffe1ffffffffff #define MODULES_VADDR_XEN 0xffffffff88000000 #define MODULES_END_XEN 0xfffffffffff00000 #define USERSPACE_TOP_XEN_RHEL4 0x0000008000000000 #define PAGE_OFFSET_XEN_RHEL4 0xffffff8000000000 #define VMALLOC_START_ADDR_XEN_RHEL4 0xffffff0000000000 #define VMALLOC_END_XEN_RHEL4 0xffffff7fffffffff #define MODULES_VADDR_XEN_RHEL4 0xffffffffa0000000 #define MODULES_END_XEN_RHEL4 0xffffffffafffffff #define VMALLOC_START_ADDR_2_6_31 0xffffc90000000000 #define VMALLOC_END_2_6_31 0xffffe8ffffffffff #define VMEMMAP_VADDR_2_6_31 0xffffea0000000000 #define VMEMMAP_END_2_6_31 0xffffeaffffffffff #define MODULES_VADDR_2_6_31 0xffffffffa0000000 #define MODULES_END_2_6_31 0xffffffffff000000 #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) x86_64_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) x86_64_IS_VMALLOC_ADDR((ulong)(X)) #define PML4_SHIFT 39 #define PTRS_PER_PML4 512 #define PGDIR_SHIFT 30 #define PTRS_PER_PGD 512 #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 #define PTRS_PER_PTE 512 #define pml4_index(address) (((address) >> PML4_SHIFT) & (PTRS_PER_PML4-1)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define IS_LAST_PML4_READ(pml4) ((ulong)(pml4) == machdep->machspec->last_pml4_read) #define FILL_PML4() { \ if (!(pc->flags & RUNTIME) || ACTIVE()) \ if (!IS_LAST_PML4_READ(vt->kernel_pgd[0])) \ readmem(vt->kernel_pgd[0], KVADDR, machdep->machspec->pml4, \ PAGESIZE(), "init_level4_pgt", FAULT_ON_ERROR); \ machdep->machspec->last_pml4_read = (ulong)(vt->kernel_pgd[0]); \ } #define FILL_PML4_HYPER() { \ if (!machdep->machspec->last_pml4_read) { \ unsigned long idle_pg_table = \ symbol_exists("idle_pg_table_4") ? symbol_value("idle_pg_table_4") : \ symbol_value("idle_pg_table"); \ readmem(idle_pg_table, KVADDR, \ machdep->machspec->pml4, PAGESIZE(), "idle_pg_table", \ FAULT_ON_ERROR); \ machdep->machspec->last_pml4_read = idle_pg_table; \ }\ } #define IS_LAST_UPML_READ(pml) ((ulong)(pml) == machdep->machspec->last_upml_read) #define FILL_UPML(PML, TYPE, SIZE) \ if (!IS_LAST_UPML_READ(PML)) { \ readmem((ulonglong)((ulong)(PML)), TYPE, machdep->machspec->upml, \ SIZE, "pml page", FAULT_ON_ERROR); \ machdep->machspec->last_upml_read = (ulong)(PML); \ } /* * PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so * for safety, use the 2.6 values to generate it. */ #define __PHYSICAL_MASK_SHIFT 40 #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) #define __VIRTUAL_MASK_SHIFT 48 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT)) #define _PAGE_BIT_NX 63 #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 2MB page */ #define _PAGE_FILE 0x040 /* set:pagecache, unset:swap */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry */ #define _PAGE_PROTNONE (machdep->machspec->page_protnone) #define _PAGE_NX (1UL<<_PAGE_BIT_NX) #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define _CPU_PDA_READ2(CPU, BUFFER) \ ((readmem(symbol_value("_cpu_pda"), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (readmem(cpu_pda_addr + ((CPU) * sizeof(void *)), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (cpu_pda_addr) && \ (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ "cpu_pda entry", RETURN_ON_ERROR))) #define _CPU_PDA_READ(CPU, BUFFER) \ ((STRNEQ("_cpu_pda", closest_symbol((symbol_value("_cpu_pda") + \ ((CPU) * sizeof(unsigned long)))))) && \ (readmem(symbol_value("_cpu_pda") + ((CPU) * sizeof(void *)), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ "cpu_pda entry", RETURN_ON_ERROR))) #define CPU_PDA_READ(CPU, BUFFER) \ (STRNEQ("cpu_pda", closest_symbol((symbol_value("cpu_pda") + \ ((CPU) * SIZE(x8664_pda))))) && \ readmem(symbol_value("cpu_pda") + ((CPU) * SIZE(x8664_pda)), \ KVADDR, (BUFFER), SIZE(x8664_pda), "cpu_pda entry", \ RETURN_ON_ERROR)) #define VALID_LEVEL4_PGT_ADDR(X) \ (((X) == VIRTPAGEBASE(X)) && IS_KVADDR(X) && !IS_VMALLOC_ADDR(X)) #define _SECTION_SIZE_BITS 27 #define _MAX_PHYSMEM_BITS 40 #define _MAX_PHYSMEM_BITS_2_6_26 44 #define _MAX_PHYSMEM_BITS_2_6_31 46 #endif /* X86_64 */ #ifdef ALPHA #define _64BIT_ #define MACHINE_TYPE "ALPHA" #define PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define KSEG_BASE_48_BIT (0xffff800000000000) #define KSEG_BASE (0xfffffc0000000000) #define _PFN_MASK (0xFFFFFFFF00000000) #define VMALLOC_START (0xFFFFFE0000000000) #define MIN_SYMBOL_VALUE (KSEG_BASE_48_BIT) #define PGDIR_SHIFT (PAGESHIFT() + 2*(PAGESHIFT()-3)) #define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT()-3)) #define PTRS_PER_PAGE (1024) #define PTRS_PER_PGD (1UL << (PAGESHIFT()-3)) /* * OSF/1 PAL-code-imposed page table bits */ #define _PAGE_VALID 0x0001 #define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ #define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ #define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ #define _PAGE_ASM 0x0010 #define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ #define _PAGE_URE 0x0200 /* xxx */ #define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ #define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ /* .. and these are ours ... */ #define _PAGE_DIRTY 0x20000 #define _PAGE_ACCESSED 0x40000 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff) #define SWP_OFFSET(entry) ((entry) >> 40) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #endif /* ALPHA */ #ifdef PPC #define _32BIT_ #define MACHINE_TYPE "PPC" #define PAGEBASE(X) ((X) & machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) /* Holds the platform specific info for page translation */ struct machine_specific { char *platform; /* page address translation bits */ int pte_size; int pte_rpn_shift; /* page flags */ ulong _page_present; ulong _page_user; ulong _page_rw; ulong _page_guarded; ulong _page_coherent; ulong _page_no_cache; ulong _page_writethru; ulong _page_dirty; ulong _page_accessed; ulong _page_hwwrite; ulong _page_shared; ulong _page_k_rw; /* platform special vtop */ int (*vtop_special)(ulong vaddr, physaddr_t *paddr, int verbose); void *mmu_special; }; /* machdep flags for ppc32 specific */ #define IS_PAE() (machdep->flags & PAE) #define IS_BOOKE() (machdep->flags & CPU_BOOKE) /* Page translation bits */ #define PPC_PLATFORM (machdep->machspec->platform) #define PTE_SIZE (machdep->machspec->pte_size) #define PTE_RPN_SHIFT (machdep->machspec->pte_rpn_shift) #define PAGE_SHIFT (12) #define PTE_T_LOG2 (ffs(PTE_SIZE) - 1) #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) #define PTRS_PER_PTE (1 << PTE_SHIFT) /* special vtop */ #define VTOP_SPECIAL (machdep->machspec->vtop_special) #define MMU_SPECIAL (machdep->machspec->mmu_special) /* PFN shifts */ #define BOOKE3E_PTE_RPN_SHIFT (24) /* PAGE flags */ #define _PAGE_PRESENT (machdep->machspec->_page_present) /* software: pte contains a translation */ #define _PAGE_USER (machdep->machspec->_page_user) /* matches one of the PP bits */ #define _PAGE_RW (machdep->machspec->_page_rw) /* software: user write access allowed */ #define _PAGE_GUARDED (machdep->machspec->_page_guarded) #define _PAGE_COHERENT (machdep->machspec->_page_coherent /* M: enforce memory coherence (SMP systems) */) #define _PAGE_NO_CACHE (machdep->machspec->_page_no_cache) /* I: cache inhibit */ #define _PAGE_WRITETHRU (machdep->machspec->_page_writethru) /* W: cache write-through */ #define _PAGE_DIRTY (machdep->machspec->_page_dirty) /* C: page changed */ #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) /* R: page referenced */ #define _PAGE_HWWRITE (machdep->machspec->_page_hwwrite) /* software: _PAGE_RW & _PAGE_DIRTY */ #define _PAGE_SHARED (machdep->machspec->_page_shared) #define _PAGE_K_RW (machdep->machspec->_page_k_rw) /* privilege only write access allowed */ /* Default values for PAGE flags */ #define DEFAULT_PAGE_PRESENT 0x001 #define DEFAULT_PAGE_USER 0x002 #define DEFAULT_PAGE_RW 0x004 #define DEFAULT_PAGE_GUARDED 0x008 #define DEFAULT_PAGE_COHERENT 0x010 #define DEFAULT_PAGE_NO_CACHE 0x020 #define DEFAULT_PAGE_WRITETHRU 0x040 #define DEFAULT_PAGE_DIRTY 0x080 #define DEFAULT_PAGE_ACCESSED 0x100 #define DEFAULT_PAGE_HWWRITE 0x200 #define DEFAULT_PAGE_SHARED 0 /* PPC44x PAGE flags: Values from kernel asm/pte-44x.h */ #define PPC44x_PAGE_PRESENT 0x001 #define PPC44x_PAGE_RW 0x002 #define PPC44x_PAGE_ACCESSED 0x008 #define PPC44x_PAGE_DIRTY 0x010 #define PPC44x_PAGE_USER 0x040 #define PPC44x_PAGE_GUARDED 0x100 #define PPC44x_PAGE_COHERENT 0x200 #define PPC44x_PAGE_NO_CACHE 0x400 #define PPC44x_PAGE_WRITETHRU 0x800 #define PPC44x_PAGE_HWWRITE 0 #define PPC44x_PAGE_SHARED 0 /* BOOK3E */ #define BOOK3E_PAGE_PRESENT 0x000001 #define BOOK3E_PAGE_BAP_SR 0x000004 #define BOOK3E_PAGE_BAP_UR 0x000008 /* User Readable */ #define BOOK3E_PAGE_BAP_SW 0x000010 #define BOOK3E_PAGE_BAP_UW 0x000020 /* User Writable */ #define BOOK3E_PAGE_DIRTY 0x001000 #define BOOK3E_PAGE_ACCESSED 0x040000 #define BOOK3E_PAGE_GUARDED 0x100000 #define BOOK3E_PAGE_COHERENT 0x200000 #define BOOK3E_PAGE_NO_CACHE 0x400000 #define BOOK3E_PAGE_WRITETHRU 0x800000 #define BOOK3E_PAGE_HWWRITE 0 #define BOOK3E_PAGE_SHARED 0 #define BOOK3E_PAGE_USER (BOOK3E_PAGE_BAP_SR | BOOK3E_PAGE_BAP_UR) #define BOOK3E_PAGE_RW (BOOK3E_PAGE_BAP_SW | BOOK3E_PAGE_BAP_UW) #define BOOK3E_PAGE_KERNEL_RW (BOOK3E_PAGE_BAP_SW | BOOK3E_PAGE_BAP_SR | BOOK3E_PAGE_DIRTY) /* FSL BOOKE */ #define FSL_BOOKE_PAGE_PRESENT 0x00001 #define FSL_BOOKE_PAGE_USER 0x00002 #define FSL_BOOKE_PAGE_RW 0x00004 #define FSL_BOOKE_PAGE_DIRTY 0x00008 #define FSL_BOOKE_PAGE_ACCESSED 0x00020 #define FSL_BOOKE_PAGE_GUARDED 0x00080 #define FSL_BOOKE_PAGE_COHERENT 0x00100 #define FSL_BOOKE_PAGE_NO_CACHE 0x00200 #define FSL_BOOKE_PAGE_WRITETHRU 0x00400 #define FSL_BOOKE_PAGE_HWWRITE 0 #define FSL_BOOKE_PAGE_SHARED 0 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 24 #define _MAX_PHYSMEM_BITS 44 #define STACK_FRAME_OVERHEAD 16 #define STACK_FRAME_LR_SAVE (sizeof(ulong)) #define STACK_FRAME_MARKER (2 * sizeof(ulong)) #define STACK_FRAME_REGS_MARKER 0x72656773 #define PPC_STACK_SIZE 8192 #endif /* PPC */ #ifdef IA64 #define _64BIT_ #define MACHINE_TYPE "IA64" #define PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define REGION_SHIFT (61) #define VADDR_REGION(X) ((ulong)(X) >> REGION_SHIFT) #define KERNEL_CACHED_REGION (7) #define KERNEL_UNCACHED_REGION (6) #define KERNEL_VMALLOC_REGION (5) #define USER_STACK_REGION (4) #define USER_DATA_REGION (3) #define USER_TEXT_REGION (2) #define USER_SHMEM_REGION (1) #define USER_IA32_EMUL_REGION (0) #define KERNEL_VMALLOC_BASE ((ulong)KERNEL_VMALLOC_REGION << REGION_SHIFT) #define KERNEL_UNCACHED_BASE ((ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT) #define KERNEL_CACHED_BASE ((ulong)KERNEL_CACHED_REGION << REGION_SHIFT) #define _SECTION_SIZE_BITS 30 #define _MAX_PHYSMEM_BITS 50 /* * As of 2.6, these are no longer straight forward. */ #define PTOV(X) ia64_PTOV((ulong)(X)) #define VTOP(X) ia64_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) ia64_IS_VMALLOC_ADDR((ulong)(X)) #define SWITCH_STACK_ADDR(X) (ia64_get_switch_stack((ulong)(X))) #define __IA64_UL(x) ((unsigned long)(x)) #define IA64_MAX_PHYS_BITS (50) /* max # of phys address bits (architected) */ /* * How many pointers will a page table level hold expressed in shift */ #define PTRS_PER_PTD_SHIFT (PAGESHIFT()-3) /* * Definitions for fourth level: */ #define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) /* * Definitions for third level: * * PMD_SHIFT determines the size of the area a third-level page table * can map. */ #define PMD_SHIFT (PAGESHIFT() + (PTRS_PER_PTD_SHIFT)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) /* * PUD_SHIFT determines the size of the area a second-level page table * can map */ #define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) /* * Definitions for first level: * * PGDIR_SHIFT determines what a first-level page table entry can map. */ #define PGDIR_SHIFT_4L (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) #define PGDIR_SHIFT_3L (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) /* Turns out 4L & 3L PGDIR_SHIFT are the same (for now) */ #define PGDIR_SHIFT PGDIR_SHIFT_4L #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT #define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ #define FIRST_USER_ADDRESS 0 /* * First, define the various bits in a PTE. Note that the PTE format * matches the VHPT short format, the firt doubleword of the VHPD long * format, and the first doubleword of the TLB insertion format. */ #define _PAGE_P (1 << 0) /* page present bit */ #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */ #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */ #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */ #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */ #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */ #define _PAGE_MA_MASK (0x7 << 2) #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */ #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */ #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */ #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */ #define _PAGE_PL_MASK (3 << 7) #define _PAGE_AR_R (0 << 9) /* read only */ #define _PAGE_AR_RX (1 << 9) /* read & execute */ #define _PAGE_AR_RW (2 << 9) /* read & write */ #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */ #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */ #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */ #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */ #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */ #define _PAGE_AR_MASK (7 << 9) #define _PAGE_AR_SHIFT 9 #define _PAGE_A (1 << 5) /* page accessed bit */ #define _PAGE_D (1 << 6) /* page dirty bit */ #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ #define _PAGE_PROTNONE (__IA64_UL(1) << 63) #define _PFN_MASK _PAGE_PPN_MASK #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_A | _PAGE_D) #define _PAGE_SIZE_4K 12 #define _PAGE_SIZE_8K 13 #define _PAGE_SIZE_16K 14 #define _PAGE_SIZE_64K 16 #define _PAGE_SIZE_256K 18 #define _PAGE_SIZE_1M 20 #define _PAGE_SIZE_4M 22 #define _PAGE_SIZE_16M 24 #define _PAGE_SIZE_64M 26 #define _PAGE_SIZE_256M 28 #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED #define EFI_PAGE_SHIFT (12) /* * NOTE: #include'ing creates too many compiler problems, so * this stuff is hardwired here; it's probably etched in stone somewhere. */ struct efi_memory_desc_t { uint32_t type; uint32_t pad; uint64_t phys_addr; uint64_t virt_addr; uint64_t num_pages; uint64_t attribute; } desc; /* Memory types: */ #define EFI_RESERVED_TYPE 0 #define EFI_LOADER_CODE 1 #define EFI_LOADER_DATA 2 #define EFI_BOOT_SERVICES_CODE 3 #define EFI_BOOT_SERVICES_DATA 4 #define EFI_RUNTIME_SERVICES_CODE 5 #define EFI_RUNTIME_SERVICES_DATA 6 #define EFI_CONVENTIONAL_MEMORY 7 #define EFI_UNUSABLE_MEMORY 8 #define EFI_ACPI_RECLAIM_MEMORY 9 #define EFI_ACPI_MEMORY_NVS 10 #define EFI_MEMORY_MAPPED_IO 11 #define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 #define EFI_PAL_CODE 13 #define EFI_MAX_MEMORY_TYPE 14 /* Attribute values: */ #define EFI_MEMORY_UC 0x0000000000000001 /* uncached */ #define EFI_MEMORY_WC 0x0000000000000002 /* write-coalescing */ #define EFI_MEMORY_WT 0x0000000000000004 /* write-through */ #define EFI_MEMORY_WB 0x0000000000000008 /* write-back */ #define EFI_MEMORY_WP 0x0000000000001000 /* write-protect */ #define EFI_MEMORY_RP 0x0000000000002000 /* read-protect */ #define EFI_MEMORY_XP 0x0000000000004000 /* execute-protect */ #define EFI_MEMORY_RUNTIME 0x8000000000000000 /* range requires runtime mapping */ #define SWP_TYPE(entry) (((entry) >> 1) & 0xff) #define SWP_OFFSET(entry) ((entry) >> 9) #define __swp_type(entry) ((entry >> 2) & 0x7f) #define __swp_offset(entry) ((entry << 1) >> 10) #define TIF_SIGPENDING (1) #define KERNEL_TR_PAGE_SIZE (1 << _PAGE_SIZE_64M) #define KERNEL_TR_PAGE_MASK (~(KERNEL_TR_PAGE_SIZE - 1)) #define UNKNOWN_PHYS_START ((ulong)(-1)) #define DEFAULT_PHYS_START (KERNEL_TR_PAGE_SIZE * 1) #define IA64_GET_STACK_ULONG(OFF) \ ((INSTACK(OFF,bt)) ? (GET_STACK_ULONG(OFF)) : get_init_stack_ulong((unsigned long)OFF)) #endif /* IA64 */ #ifdef PPC64 #define _64BIT_ #define MACHINE_TYPE "PPC64" #define PPC64_64K_PAGE_SIZE 65536 #define PPC64_STACK_SIZE 16384 #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->identity_map_base)) #define VTOP(X) ((unsigned long)(X)-(machdep->identity_map_base)) #define BOOK3E_VMBASE 0x8000000000000000 #define IS_VMALLOC_ADDR(X) machdep->machspec->is_vmaddr(X) #define KERNELBASE machdep->pageoffset #define PGDIR_SHIFT (machdep->pageshift + (machdep->pageshift -3) + (machdep->pageshift - 2)) #define PMD_SHIFT (machdep->pageshift + (machdep->pageshift - 3)) #define PGD_MASK (~((1UL << PGDIR_SHIFT) - 1)) #define PMD_MASK (~((1UL << PMD_SHIFT) - 1)) /* shift to put page number into pte */ #define PTE_SHIFT 16 #define PMD_TO_PTEPAGE_SHIFT 2 /* Used for 2.6 or later */ #define PTE_INDEX_SIZE 9 #define PMD_INDEX_SIZE 10 #define PGD_INDEX_SIZE 10 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) #define PGD_OFFSET_24(vaddr) ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define PGD_OFFSET(vaddr) ((vaddr >> PGDIR_SHIFT) & 0x7ff) #define PMD_OFFSET(vaddr) ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) /* 4-level page table support */ /* 4K pagesize */ #define PTE_INDEX_SIZE_L4_4K 9 #define PMD_INDEX_SIZE_L4_4K 7 #define PUD_INDEX_SIZE_L4_4K 7 #define PGD_INDEX_SIZE_L4_4K 9 #define PTE_SHIFT_L4_4K 17 #define PMD_MASKED_BITS_4K 0 /* 64K pagesize */ #define PTE_INDEX_SIZE_L4_64K 12 #define PMD_INDEX_SIZE_L4_64K 12 #define PUD_INDEX_SIZE_L4_64K 0 #define PGD_INDEX_SIZE_L4_64K 4 #define PTE_INDEX_SIZE_L4_64K_3_10 8 #define PMD_INDEX_SIZE_L4_64K_3_10 10 #define PGD_INDEX_SIZE_L4_64K_3_10 12 #define PTE_SHIFT_L4_64K_V1 32 #define PTE_SHIFT_L4_64K_V2 30 #define PTE_SHIFT_L4_BOOK3E_64K 28 #define PTE_SHIFT_L4_BOOK3E_4K 24 #define PMD_MASKED_BITS_64K 0x1ff #define PD_HUGE 0x8000000000000000 #define HUGE_PTE_MASK 0x03 #define HUGEPD_SHIFT_MASK 0x3f #define L4_MASK (THIS_KERNEL_VERSION >= LINUX(3,10,0) ? 0xfff : 0x1ff) #define L4_OFFSET(vaddr) ((vaddr >> (machdep->machspec->l4_shift)) & L4_MASK) #define PGD_OFFSET_L4(vaddr) \ ((vaddr >> (machdep->machspec->l3_shift)) & (machdep->machspec->ptrs_per_l3 - 1)) #define PMD_OFFSET_L4(vaddr) \ ((vaddr >> (machdep->machspec->l2_shift)) & (machdep->machspec->ptrs_per_l2 - 1)) #define _PAGE_PRESENT (machdep->machspec->_page_present) /* software: pte contains a translation */ #define _PAGE_USER (machdep->machspec->_page_user) /* matches one of the PP bits */ #define _PAGE_RW (machdep->machspec->_page_rw) /* software: user write access allowed */ #define _PAGE_GUARDED (machdep->machspec->_page_guarded) #define _PAGE_COHERENT (machdep->machspec->_page_coherent /* M: enforce memory coherence (SMP systems) */) #define _PAGE_NO_CACHE (machdep->machspec->_page_no_cache) /* I: cache inhibit */ #define _PAGE_WRITETHRU (machdep->machspec->_page_writethru) /* W: cache write-through */ #define _PAGE_DIRTY (machdep->machspec->_page_dirty) /* C: page changed */ #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) /* R: page referenced */ #define TIF_SIGPENDING (2) #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define MSR_PR_LG 14 /* Problem State / Privilege Level */ /* Used to find the user or kernel-mode frame*/ #define STACK_FRAME_OVERHEAD 112 #define EXCP_FRAME_MARKER 0x7265677368657265 #define _SECTION_SIZE_BITS 24 #define _MAX_PHYSMEM_BITS 44 #define _MAX_PHYSMEM_BITS_3_7 46 #endif /* PPC64 */ #ifdef S390 #define _32BIT_ #define MACHINE_TYPE "S390" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define PTRS_PER_PTE 1024 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 512 #define SEGMENT_TABLE_SIZE ((sizeof(ulong)*4) * PTRS_PER_PGD) #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffe) | \ (((entry) >> 7) & 0x1)) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 25 #define _MAX_PHYSMEM_BITS 31 #endif /* S390 */ #ifdef S390X #define _64BIT_ #define MACHINE_TYPE "S390X" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define PTRS_PER_PTE 512 #define PTRS_PER_PMD 1024 #define PTRS_PER_PGD 2048 #define SEGMENT_TABLE_SIZE ((sizeof(ulong)*2) * PTRS_PER_PMD) #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffffffffffe) | \ (((entry) >> 7) & 0x1)) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 28 #define _MAX_PHYSMEM_BITS_OLD 42 #define _MAX_PHYSMEM_BITS_NEW 46 #endif /* S390X */ #ifdef PLATFORM #define SWP_TYPE(entry) (error("PLATFORM_SWP_TYPE: TBD\n")) #define SWP_OFFSET(entry) (error("PLATFORM_SWP_OFFSET: TBD\n")) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #endif /* PLATFORM */ #define KILOBYTES(x) ((x) * (1024)) #define MEGABYTES(x) ((x) * (1048576)) #define GIGABYTES(x) ((x) * (1073741824)) #define MEGABYTE_MASK (MEGABYTES(1)-1) #define SIZEOF_64BIT (8) #define SIZEOF_32BIT (4) #define SIZEOF_16BIT (2) #define SIZEOF_8BIT (1) #ifdef ARM #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef X86 #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef ALPHA #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (11) #endif #ifdef PPC #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef IA64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef S390 #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef S390X #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef X86_64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (10) #endif #ifdef PPC64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef ARM64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (10) #endif #ifdef MIPS #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #define BADADDR ((ulong)(-1)) #define BADVAL ((ulong)(-1)) #define UNUSED (-1) #define UNINITIALIZED (BADVAL) #define BITS_PER_BYTE (8) #define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) #define NUM_TO_BIT(x) (1UL<<((x)%BITS_PER_LONG)) #define NUM_IN_BITMAP(bitmap, x) (bitmap[(x)/BITS_PER_LONG] & NUM_TO_BIT(x)) #define SET_BIT(bitmap, x) (bitmap[(x)/BITS_PER_LONG] |= NUM_TO_BIT(x)) /* * precision lengths for fprintf */ #define VADDR_PRLEN (sizeof(char *) == 8 ? 16 : 8) #define LONG_LONG_PRLEN (16) #define LONG_PRLEN (sizeof(long) == 8 ? 16 : 8) #define INT_PRLEN (sizeof(int) == 8 ? 16 : 8) #define CHAR_PRLEN (2) #define SHORT_PRLEN (4) #define MINSPACE (-100) #define SYNOPSIS (0x1) #define COMPLETE_HELP (0x2) #define PIPE_TO_SCROLL (0x4) #define MUST_HELP (0x8) #define LEFT_JUSTIFY (1) #define RIGHT_JUSTIFY (2) #define CENTER (0x1) #define LJUST (0x2) #define RJUST (0x4) #define LONG_DEC (0x8) #define LONG_HEX (0x10) #define INT_DEC (0x20) #define INT_HEX (0x40) #define LONGLONG_HEX (0x80) #define ZERO_FILL (0x100) #define INIT_TIME (1) #define RUN_TIME (2) /* * IRQ line status. * For kernels up to and including 2.6.17 */ #define IRQ_INPROGRESS_2_6_17 1 /* IRQ handler active - do not enter! */ #define IRQ_DISABLED_2_6_17 2 /* IRQ disabled - do not enter! */ #define IRQ_PENDING_2_6_17 4 /* IRQ pending - replay on enable */ #define IRQ_REPLAY_2_6_17 8 /* IRQ has been replayed but not acked yet */ #define IRQ_AUTODETECT_2_6_17 16 /* IRQ is being autodetected */ #define IRQ_WAITING_2_6_17 32 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL_2_6_17 64 /* IRQ level triggered */ #define IRQ_MASKED_2_6_17 128 /* IRQ masked - shouldn't be seen again */ /* * For kernel 2.6.21 and later */ #define IRQ_TYPE_NONE_2_6_21 0x00000000 /* Default, unspecified type */ #define IRQ_TYPE_EDGE_RISING_2_6_21 0x00000001 /* Edge rising type */ #define IRQ_TYPE_EDGE_FALLING_2_6_21 0x00000002 /* Edge falling type */ #define IRQ_TYPE_EDGE_BOTH_2_6_21 (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) #define IRQ_TYPE_LEVEL_HIGH_2_6_21 0x00000004 /* Level high type */ #define IRQ_TYPE_LEVEL_LOW_2_6_21 0x00000008 /* Level low type */ #define IRQ_TYPE_SENSE_MASK_2_6_21 0x0000000f /* Mask of the above */ #define IRQ_TYPE_PROBE_2_6_21 0x00000010 /* Probing in progress */ #define IRQ_INPROGRESS_2_6_21 0x00000100 /* IRQ handler active - do not enter! */ #define IRQ_DISABLED_2_6_21 0x00000200 /* IRQ disabled - do not enter! */ #define IRQ_PENDING_2_6_21 0x00000400 /* IRQ pending - replay on enable */ #define IRQ_REPLAY_2_6_21 0x00000800 /* IRQ has been replayed but not acked yet */ #define IRQ_AUTODETECT_2_6_21 0x00001000 /* IRQ is being autodetected */ #define IRQ_WAITING_2_6_21 0x00002000 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL_2_6_21 0x00004000 /* IRQ level triggered */ #define IRQ_MASKED_2_6_21 0x00008000 /* IRQ masked - shouldn't be seen again */ #define IRQ_PER_CPU_2_6_21 0x00010000 /* IRQ is per CPU */ #define IRQ_NOPROBE_2_6_21 0x00020000 /* IRQ is not valid for probing */ #define IRQ_NOREQUEST_2_6_21 0x00040000 /* IRQ cannot be requested */ #define IRQ_NOAUTOEN_2_6_21 0x00080000 /* IRQ will not be enabled on request irq */ #define IRQ_WAKEUP_2_6_21 0x00100000 /* IRQ triggers system wakeup */ #define IRQ_MOVE_PENDING_2_6_21 0x00200000 /* need to re-target IRQ destination */ #define IRQ_NO_BALANCING_2_6_21 0x00400000 /* IRQ is excluded from balancing */ #define IRQ_SPURIOUS_DISABLED_2_6_21 0x00800000 /* IRQ was disabled by the spurious trap */ #define IRQ_MOVE_PCNTXT_2_6_21 0x01000000 /* IRQ migration from process context */ #define IRQ_AFFINITY_SET_2_6_21 0x02000000 /* IRQ affinity was set from userspace*/ /* * Select proper IRQ value depending on kernel version */ #define IRQ_TYPE_NONE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_NONE_2_6_21 : 0) #define IRQ_TYPE_EDGE_RISING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_RISING_2_6_21 : 0) #define IRQ_TYPE_EDGE_FALLING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_FALLING_2_6_21 : 0) #define IRQ_TYPE_EDGE_BOTH \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_BOTH_2_6_21 : 0) #define IRQ_TYPE_LEVEL_HIGH \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_LEVEL_HIGH_2_6_21 : 0) #define IRQ_TYPE_LEVEL_LOW \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_LEVEL_LOW_2_6_21 : 0) #define IRQ_TYPE_SENSE_MASK \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_SENSE_MASK_2_6_21 : 0) #define IRQ_TYPE_PROBE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_PROBE_2_6_21 : 0) #define IRQ_INPROGRESS \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_INPROGRESS_2_6_21 : IRQ_INPROGRESS_2_6_17) #define IRQ_DISABLED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_DISABLED_2_6_21 : IRQ_DISABLED_2_6_17) #define IRQ_PENDING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_PENDING_2_6_21 : IRQ_PENDING_2_6_17) #define IRQ_REPLAY \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_REPLAY_2_6_21 : IRQ_REPLAY_2_6_17) #define IRQ_AUTODETECT \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_AUTODETECT_2_6_21 : IRQ_AUTODETECT_2_6_17) #define IRQ_WAITING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_WAITING_2_6_21 : IRQ_WAITING_2_6_17) #define IRQ_LEVEL \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_LEVEL_2_6_21 : IRQ_LEVEL_2_6_17) #define IRQ_MASKED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MASKED_2_6_21 : IRQ_MASKED_2_6_17) #define IRQ_PER_CPU \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_PER_CPU_2_6_21 : 0) #define IRQ_NOPROBE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOPROBE_2_6_21 : 0) #define IRQ_NOREQUEST \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOREQUEST_2_6_21 : 0) #define IRQ_NOAUTOEN \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOAUTOEN_2_6_21 : 0) #define IRQ_WAKEUP \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_WAKEUP_2_6_21 : 0) #define IRQ_MOVE_PENDING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MOVE_PENDING_2_6_21 : 0) #define IRQ_NO_BALANCING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NO_BALANCING_2_6_21 : 0) #define IRQ_SPURIOUS_DISABLED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_SPURIOUS_DISABLED_2_6_21 : 0) #define IRQ_MOVE_PCNTXT \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MOVE_PCNTXT_2_6_21 : 0) #define IRQ_AFFINITY_SET \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_AFFINITY_SET_2_6_21 : 0) #ifdef ARM #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef X86 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef X86_64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef ALPHA #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x40000000 #endif #ifdef PPC #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef PPC64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000u #endif #ifdef IA64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef S390 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef S390X #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #define ACTION_FLAGS (SA_INTERRUPT|SA_PROBE|SA_SAMPLE_RANDOM|SA_SHIRQ) #endif /* !GDB_COMMON */ /* * Common request structure for BFD or GDB data or commands. */ struct gnu_request { int command; char *buf; FILE *fp; ulong addr; ulong addr2; ulong count; ulong flags; char *name; ulong length; int typecode; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) char *typename; #else char *type_name; #endif char *target_typename; ulong target_length; int target_typecode; int is_typedef; char *member; long member_offset; long member_length; int member_typecode; long value; char *tagname; ulong pc; ulong sp; ulong ra; int curframe; ulong frame; ulong prevsp; ulong prevpc; ulong lastsp; ulong task; ulong debug; struct stack_hook *hookp; }; /* * GNU commands */ #define GNU_DATATYPE_INIT (1) #define GNU_DISASSEMBLE (2) #define GNU_GET_LINE_NUMBER (3) #define GNU_PASS_THROUGH (4) #define GNU_GET_DATATYPE (5) #define GNU_COMMAND_EXISTS (6) #define GNU_STACK_TRACE (7) #define GNU_ALPHA_FRAME_OFFSET (8) #define GNU_FUNCTION_NUMARGS (9) #define GNU_RESOLVE_TEXT_ADDR (10) #define GNU_ADD_SYMBOL_FILE (11) #define GNU_DELETE_SYMBOL_FILE (12) #define GNU_VERSION (13) #define GNU_PATCH_SYMBOL_VALUES (14) #define GNU_GET_SYMBOL_TYPE (15) #define GNU_USER_PRINT_OPTION (16) #define GNU_SET_CRASH_BLOCK (17) #define GNU_GET_FUNCTION_RANGE (18) #define GNU_DEBUG_COMMAND (100) /* * GNU flags */ #define GNU_PRINT_LINE_NUMBERS (0x1) #define GNU_FUNCTION_ONLY (0x2) #define GNU_PRINT_ENUMERATORS (0x4) #define GNU_RETURN_ON_ERROR (0x8) #define GNU_COMMAND_FAILED (0x10) #define GNU_FROM_TTY_OFF (0x20) #define GNU_NO_READMEM (0x40) #define GNU_VAR_LENGTH_TYPECODE (0x80) #undef TRUE #undef FALSE #define TRUE (1) #define FALSE (0) #ifdef GDB_COMMON /* * function prototypes required by modified gdb source files. */ int console(char *, ...); int gdb_CRASHDEBUG(ulong); int gdb_readmem_callback(ulong, void *, int, int); void patch_load_module(struct objfile *objfile, struct minimal_symbol *msymbol); int patch_kernel_symbol(struct gnu_request *); struct syment *symbol_search(char *); int gdb_line_number_callback(ulong, ulong, ulong); int gdb_print_callback(ulong); #endif #ifndef GDB_COMMON /* * WARNING: the following type codes are type_code enums from gdb/gdbtypes.h */ enum type_code { TYPE_CODE_UNDEF, /* Not used; catches errors */ TYPE_CODE_PTR, /* Pointer type */ TYPE_CODE_ARRAY, /* Array type with lower & upper bounds. */ TYPE_CODE_STRUCT, /* C struct or Pascal record */ TYPE_CODE_UNION, /* C union or Pascal variant part */ TYPE_CODE_ENUM, /* Enumeration type */ #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) #if defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) TYPE_CODE_FLAGS, /* Bit flags type */ #endif TYPE_CODE_FUNC, /* Function type */ TYPE_CODE_INT, /* Integer type */ /* Floating type. This is *NOT* a complex type. Beware, there are parts of GDB which bogusly assume that TYPE_CODE_FLT can mean complex. */ TYPE_CODE_FLT, /* Void type. The length field specifies the length (probably always one) which is used in pointer arithmetic involving pointers to this type, but actually dereferencing such a pointer is invalid; a void type has no length and no actual representation in memory or registers. A pointer to a void type is a generic pointer. */ TYPE_CODE_VOID, TYPE_CODE_SET, /* Pascal sets */ TYPE_CODE_RANGE, /* Range (integers within spec'd bounds) */ /* * NOTE: the remainder of the type codes are not list or used here... */ TYPE_CODE_BOOL = 20, #endif }; #define PF_EXITING 0x00000004 /* getting shut down */ extern long _ZOMBIE_; #define IS_ZOMBIE(task) (task_state(task) & _ZOMBIE_) #define IS_EXITING(task) (task_flags(task) & PF_EXITING) /* * ps command options. */ #define PS_BY_PID (0x1) #define PS_BY_TASK (0x2) #define PS_BY_CMD (0x4) #define PS_SHOW_ALL (0x8) #define PS_PPID_LIST (0x10) #define PS_CHILD_LIST (0x20) #define PS_KERNEL (0x40) #define PS_USER (0x80) #define PS_TIMES (0x100) #define PS_KSTACKP (0x200) #define PS_LAST_RUN (0x400) #define PS_ARGV_ENVP (0x800) #define PS_TGID_LIST (0x1000) #define PS_RLIMIT (0x2000) #define PS_GROUP (0x4000) #define PS_BY_REGEX (0x8000) #define PS_NO_HEADER (0x10000) #define PS_MSECS (0x20000) #define PS_SUMMARY (0x40000) #define PS_EXCLUSIVE (PS_TGID_LIST|PS_ARGV_ENVP|PS_TIMES|PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN|PS_RLIMIT|PS_MSECS|PS_SUMMARY) #define MAX_PS_ARGS (100) /* maximum command-line specific requests */ struct psinfo { int argc; ulong pid[MAX_PS_ARGS]; int type[MAX_PS_ARGS]; ulong task[MAX_PS_ARGS]; char comm[MAX_PS_ARGS][TASK_COMM_LEN+1]; struct regex_data { char *pattern; regex_t regex; } regex_data[MAX_PS_ARGS]; int regexs; ulong *cpus; }; #define IS_A_NUMBER(X) (decimal(X, 0) || hexadecimal(X, 0)) #define AMBIGUOUS_NUMBER(X) (decimal(X, 0) && hexadecimal(X, 0)) #define is_mclx_compressed_dump(X) (va_server_init((X), 0, 0, 0) == 0) struct task_mem_usage { ulong rss; ulong total_vm; double pct_physmem; ulong mm_struct_addr; ulong pgd_addr; }; /* * Global data (global_data.c) */ extern FILE *fp; extern struct program_context program_context, *pc; extern struct task_table task_table, *tt; extern struct kernel_table kernel_table, *kt; extern struct command_table_entry linux_command_table[]; extern char *args[MAXARGS]; extern int argcnt; extern int argerrs; extern struct offset_table offset_table; extern struct size_table size_table; extern struct array_table array_table; extern struct vm_table vm_table, *vt; extern struct machdep_table *machdep; extern struct symbol_table_data symbol_table_data, *st; extern struct extension_table *extension_table; /* * Generated in build_data.c */ extern char *build_command; extern char *build_data; extern char *build_target; extern char *build_version; extern char *compiler_version; /* * command prototypes */ void cmd_quit(void); /* main.c */ void cmd_mach(void); /* main.c */ void cmd_help(void); /* help.c */ void cmd_test(void); /* test.c */ void cmd_ascii(void); /* tools.c */ void cmd_set(void); /* tools.c */ void cmd_eval(void); /* tools.c */ void cmd_list(void); /* tools.c */ void cmd_tree(void); /* tools.c */ void cmd_template(void); /* tools.c */ void cmd_alias(void); /* cmdline.c */ void cmd_repeat(void); /* cmdline.c */ void cmd_rd(void); /* memory.c */ void cmd_wr(void); /* memory.c */ void cmd_ptov(void); /* memory.c */ void cmd_vtop(void); /* memory.c */ void cmd_vm(void); /* memory.c */ void cmd_ptob(void); /* memory.c */ void cmd_btop(void); /* memory.c */ void cmd_kmem(void); /* memory.c */ void cmd_search(void); /* memory.c */ void cmd_swap(void); /* memory.c */ void cmd_pte(void); /* memory.c */ void cmd_ps(void); /* task.c */ void cmd_task(void); /* task.c */ void cmd_foreach(void); /* task.c */ void cmd_runq(void); /* task.c */ void cmd_sig(void); /* task.c */ void cmd_bt(void); /* kernel.c */ void cmd_dis(void); /* kernel.c */ void cmd_mod(void); /* kernel.c */ void cmd_log(void); /* kernel.c */ void cmd_sys(void); /* kernel.c */ void cmd_irq(void); /* kernel.c */ void cmd_timer(void); /* kernel.c */ void cmd_waitq(void); /* kernel.c */ void cmd_sym(void); /* symbols.c */ void cmd_struct(void); /* symbols.c */ void cmd_union(void); /* symbols.c */ void cmd_pointer(void); /* symbols.c */ void cmd_whatis(void); /* symbols.c */ void cmd_p(void); /* symbols.c */ void cmd_mount(void); /* filesys.c */ void cmd_files(void); /* filesys.c */ void cmd_fuser(void); /* filesys.c */ void cmd_dev(void); /* dev.c */ void cmd_gdb(void); /* gdb_interface.c */ void cmd_net(void); /* net.c */ void cmd_extend(void); /* extensions.c */ #if defined(S390) || defined(S390X) void cmd_s390dbf(void); #endif void cmd_map(void); /* kvmdump.c */ void cmd_ipcs(void); /* ipcs.c */ /* * main.c */ void main_loop(void); void exec_command(void); struct command_table_entry *get_command_table_entry(char *); void program_usage(int); #define LONG_FORM (1) #define SHORT_FORM (0) void dump_program_context(void); void dump_build_data(void); #ifdef ARM #define machdep_init(X) arm_init(X) #endif #ifdef ARM64 #define machdep_init(X) arm64_init(X) #endif #ifdef X86 #define machdep_init(X) x86_init(X) #endif #ifdef ALPHA #define machdep_init(X) alpha_init(X) #endif #ifdef PPC #define machdep_init(X) ppc_init(X) #endif #ifdef IA64 #define machdep_init(X) ia64_init(X) #endif #ifdef S390 #define machdep_init(X) s390_init(X) #endif #ifdef S390X #define machdep_init(X) s390x_init(X) #endif #ifdef X86_64 #define machdep_init(X) x86_64_init(X) #endif #ifdef PPC64 #define machdep_init(X) ppc64_init(X) #endif #ifdef MIPS #define machdep_init(X) mips_init(X) #endif int clean_exit(int); int untrusted_file(FILE *, char *); char *readmem_function_name(void); char *writemem_function_name(void); /* * cmdline.c */ void restart(int); void alias_init(char *); struct alias_data *is_alias(char *); void deallocate_alias(char *); void cmdline_init(void); void set_command_prompt(char *); void exec_input_file(void); void process_command_line(void); void dump_history(void); void resolve_rc_cmd(char *, int); void dump_alias_data(void); int output_open(void); #define output_closed() (!output_open()) void close_output(void); int interruptible(void); int received_SIGINT(void); void debug_redirect(char *); int CRASHPAGER_valid(void); char *setup_scroll_command(void); int minimal_functions(char *); int is_args_input_file(struct command_table_entry *, struct args_input_file *); void exec_args_input_file(struct command_table_entry *, struct args_input_file *); /* * tools.c */ int __error(int, char *, ...); #define error __error /* avoid conflict with gdb error() */ int console(char *, ...); void create_console_device(char *); int console_off(void); int console_on(int); int console_verbatim(char *); int whitespace(int); int ascii(int); int ascii_string(char *); int printable_string(char *); char *clean_line(char *); char *strip_line_end(char *); char *strip_linefeeds(char *); char *strip_beginning_whitespace(char *); char *strip_ending_whitespace(char *); char *strip_ending_char(char *, char); char *strip_beginning_char(char *, char); char *strip_comma(char *); char *strip_hex(char *); char *upper_case(char *, char *); char *first_nonspace(char *); char *first_space(char *); char *replace_string(char *, char *, char); void string_insert(char *, char *); char *strstr_rightmost(char *, char *); char *null_first_space(char *); int parse_line(char *, char **); void print_verbatim(FILE *, char *); char *fixup_percent(char *); int can_eval(char *); ulong eval(char *, int, int *); ulonglong evall(char *, int, int *); int eval_common(char *, int, int *, struct number_option *); ulong htol(char *, int, int *); ulong dtol(char *, int, int *); unsigned int dtoi(char *, int, int *); ulong stol(char *, int, int *); ulonglong stoll(char *, int, int *); ulonglong htoll(char *, int, int *); ulonglong dtoll(char *, int, int *); int decimal(char *, int); int hexadecimal(char *, int); int hexadecimal_only(char *, int); ulong convert(char *, int, int *, ulong); void pad_line(FILE *, int, char); #define INDENT(x) pad_line(fp, x, ' ') char *mkstring(char *, int, ulong, const char *); #define MKSTR(X) ((const char *)(X)) int count_leading_spaces(char *); int count_chars(char *, char); long count_buffer_chars(char *, char, long); char *space(int); char *concat_args(char *, int, int); char *shift_string_left(char *, int); char *shift_string_right(char *, int); int bracketed(char *, char *, int); void backspace(int); int do_list(struct list_data *); int do_rdtree(struct tree_data *); int do_rbtree(struct tree_data *); int retrieve_list(ulong *, int); long power(long, int); long long ll_power(long long, long long); void hq_init(void); int hq_open(void); int hq_close(void); int hq_enter(ulong); int hq_entry_exists(ulong); int hq_is_open(void); int hq_is_inuse(void); long get_embedded(void); void dump_embedded(char *); char *ordinal(ulong, char *); char *first_nonspace(char *); void dump_hash_table(int); void dump_shared_bufs(void); void drop_core(char *); int extract_hex(char *, ulong *, char, ulong); int count_bits_int(int); int count_bits_long(ulong); int highest_bit_long(ulong); int lowest_bit_long(ulong); void buf_init(void); void sym_buf_init(void); void free_all_bufs(void); char *getbuf(long); void freebuf(char *); char *resizebuf(char *, long, long); char *strdupbuf(char *); #define GETBUF(X) getbuf((long)(X)) #define FREEBUF(X) freebuf((char *)(X)) #define RESIZEBUF(X,Y,Z) (X) = resizebuf((char *)(X), (long)(Y), (long)(Z)); #define STRDUPBUF(X) strdupbuf((char *)(X)) void sigsetup(int, void *, struct sigaction *, struct sigaction *); #define SIGACTION(s, h, a, o) sigsetup(s, h, a, o) char *convert_time(ulonglong, char *); void stall(ulong); char *pages_to_size(ulong, char *); int clean_arg(void); int empty_list(ulong); int machine_type(char *); int machine_type_mismatch(char *, char *, char *, ulong); void command_not_supported(void); void option_not_supported(int); void please_wait(char *); void please_wait_done(void); int pathcmp(char *, char *); int calculate(char *, ulong *, ulonglong *, ulong); int endian_mismatch(char *, char, ulong); uint16_t swap16(uint16_t, int); uint32_t swap32(uint32_t, int); ulong *get_cpumask_buf(void); int make_cpumask(char *, ulong *, int, int *); size_t strlcpy(char *, char *, size_t); struct rb_node *rb_first(struct rb_root *); struct rb_node *rb_parent(struct rb_node *, struct rb_node *); struct rb_node *rb_right(struct rb_node *, struct rb_node *); struct rb_node *rb_left(struct rb_node *, struct rb_node *); struct rb_node *rb_next(struct rb_node *); struct rb_node *rb_last(struct rb_root *); /* * symbols.c */ void symtab_init(void); char *check_specified_kernel_debug_file(void); void no_debugging_data(int); void get_text_init_space(void); int is_kernel_text(ulong); int is_kernel_data(ulong); int is_init_data(ulong value); int is_kernel_text_offset(ulong); int is_symbol_text(struct syment *); int is_rodata(ulong, struct syment **); int get_text_function_range(ulong, ulong *, ulong *); void datatype_init(void); struct syment *symbol_search(char *); struct syment *value_search(ulong, ulong *); struct syment *value_search_base_kernel(ulong, ulong *); struct syment *value_search_module(ulong, ulong *); struct syment *symbol_search_next(char *, struct syment *); ulong highest_bss_symbol(void); int in_ksymbol_range(ulong); int module_symbol(ulong, struct syment **, struct load_module **, char *, ulong); #define IS_MODULE_VADDR(X) \ (module_symbol((ulong)(X), NULL, NULL, NULL, *gdb_output_radix)) char *closest_symbol(ulong); ulong closest_symbol_value(ulong); #define SAME_FUNCTION(X,Y) (closest_symbol_value(X) == closest_symbol_value(Y)) void show_symbol(struct syment *, ulong, ulong); #define SHOW_LINENUM (0x1) #define SHOW_SECTION (0x2) #define SHOW_HEX_OFFS (0x4) #define SHOW_DEC_OFFS (0x8) #define SHOW_RADIX() (*gdb_output_radix == 16 ? SHOW_HEX_OFFS : SHOW_DEC_OFFS) #define SHOW_MODULE (0x10) int symbol_name_count(char *); int symbol_query(char *, char *, struct syment **); struct syment *next_symbol(char *, struct syment *); struct syment *prev_symbol(char *, struct syment *); void get_symbol_data(char *, long, void *); int try_get_symbol_data(char *, long, void *); char *value_to_symstr(ulong, char *, ulong); char *value_symbol(ulong); ulong symbol_value(char *); ulong symbol_value_module(char *, char *); struct syment *per_cpu_symbol_search(char *); int symbol_exists(char *s); int kernel_symbol_exists(char *s); struct syment *kernel_symbol_search(char *); int get_syment_array(char *, struct syment **, int); void set_temporary_radix(unsigned int, unsigned int *); void restore_current_radix(unsigned int); void dump_struct(char *, ulong, unsigned); void dump_struct_member(char *, ulong, unsigned); void dump_union(char *, ulong, unsigned); void store_module_symbols_v1(ulong, int); void store_module_symbols_v2(ulong, int); int is_datatype_command(void); int is_typedef(char *); int arg_to_datatype(char *, struct datatype_member *, ulong); void dump_symbol_table(void); void dump_struct_table(ulong); void dump_offset_table(char *, ulong); int is_elf_file(char *); int is_kernel(char *); int is_shared_object(char *); int file_elf_version(char *); int is_system_map(char *); int is_compressed_kernel(char *, char **); int select_namelist(char *); int get_array_length(char *, int *, long); int get_array_length_alt(char *, char *, int *, long); int builtin_array_length(char *, int, int *); char *get_line_number(ulong, char *, int); char *get_build_directory(char *); int datatype_exists(char *); int get_function_numargs(ulong); int is_module_name(char *, ulong *, struct load_module **); int is_module_address(ulong, char *); ulong lowest_module_address(void); ulong highest_module_address(void); int load_module_symbols(char *, char *, ulong); void delete_load_module(ulong); ulong gdb_load_module_callback(ulong, char *); char *load_module_filter(char *, int); #define LM_P_FILTER (1) #define LM_DIS_FILTER (2) long datatype_info(char *, char *, struct datatype_member *); int get_symbol_type(char *, char *, struct gnu_request *); int get_symbol_length(char *); int text_value_cache(ulong, uint32_t, uint32_t *); int text_value_cache_byte(ulong, unsigned char *); void dump_text_value_cache(int); void clear_text_value_cache(void); void dump_numargs_cache(void); int patch_kernel_symbol(struct gnu_request *); struct syment *generic_machdep_value_to_symbol(ulong, ulong *); long OFFSET_verify(long, char *, char *, int, char *); long SIZE_verify(long, char *, char *, int, char *); long OFFSET_option(long, long, char *, char *, int, char *, char *); long SIZE_option(long, long, char *, char *, int, char *, char *); void dump_trace(void **); int enumerator_value(char *, long *); int dump_enumerator_list(char *); struct load_module *init_module_function(ulong); struct struct_member_data { char *structure; char *member; long type; long unsigned_type; long length; long offset; long bitpos; long bitsize; }; int fill_struct_member_data(struct struct_member_data *); void parse_for_member_extended(struct datatype_member *, ulong); void add_to_downsized(char *); int is_downsized(char *); /* * memory.c */ void mem_init(void); void vm_init(void); int readmem(ulonglong, int, void *, long, char *, ulong); int writemem(ulonglong, int, void *, long, char *, ulong); int generic_verify_paddr(uint64_t); int read_dev_mem(int, void *, int, ulong, physaddr_t); int read_memory_device(int, void *, int, ulong, physaddr_t); int read_mclx_dumpfile(int, void *, int, ulong, physaddr_t); int read_lkcd_dumpfile(int, void *, int, ulong, physaddr_t); int read_daemon(int, void *, int, ulong, physaddr_t); int write_dev_mem(int, void *, int, ulong, physaddr_t); int write_memory_device(int, void *, int, ulong, physaddr_t); int write_mclx_dumpfile(int, void *, int, ulong, physaddr_t); int write_lkcd_dumpfile(int, void *, int, ulong, physaddr_t); int write_daemon(int, void *, int, ulong, physaddr_t); int kvtop(struct task_context *, ulong, physaddr_t *, int); int uvtop(struct task_context *, ulong, physaddr_t *, int); void do_vtop(ulong, struct task_context *, ulong); void raw_stack_dump(ulong, ulong); void raw_data_dump(ulong, long, int); int accessible(ulong); ulong vm_area_dump(ulong, ulong, ulong, struct reference *); #define IN_TASK_VMA(TASK,VA) (vm_area_dump((TASK), UVADDR|VERIFY_ADDR, (VA), 0)) char *fill_vma_cache(ulong); void clear_vma_cache(void); void dump_vma_cache(ulong); int is_page_ptr(ulong, physaddr_t *); void dump_vm_table(int); int read_string(ulong, char *, int); void get_task_mem_usage(ulong, struct task_mem_usage *); char *get_memory_size(char *); uint64_t generic_memory_size(void); char *swap_location(ulonglong, char *); void clear_swap_info_cache(void); uint memory_page_size(void); void force_page_size(char *); ulong first_vmalloc_address(void); ulong last_vmalloc_address(void); int in_vmlist_segment(ulong); int phys_to_page(physaddr_t, ulong *); int generic_get_kvaddr_ranges(struct vaddr_range *); int l1_cache_size(void); int dumpfile_memory(int); #define DUMPFILE_MEM_USED (1) #define DUMPFILE_FREE_MEM (2) #define DUMPFILE_MEM_DUMP (3) #define DUMPFILE_ENVIRONMENT (4) uint64_t total_node_memory(void); int generic_is_kvaddr(ulong); int generic_is_uvaddr(ulong, struct task_context *); void fill_stackbuf(struct bt_info *); void alter_stackbuf(struct bt_info *); int vaddr_type(ulong, struct task_context *); char *format_stack_entry(struct bt_info *bt, char *, ulong, ulong); int in_user_stack(ulong, ulong); int dump_inode_page(ulong); /* * filesys.c */ void fd_init(void); void vfs_init(void); int is_a_tty(char *); int file_exists(char *, struct stat *); int file_readable(char *); int is_directory(char *); char *search_directory_tree(char *, char *, int); void open_tmpfile(void); void close_tmpfile(void); void open_tmpfile2(void); void set_tmpfile2(FILE *); void close_tmpfile2(void); void open_files_dump(ulong, int, struct reference *); void get_pathname(ulong, char *, int, int, ulong); char *vfsmount_devname(ulong, char *, int); ulong file_to_dentry(ulong); ulong file_to_vfsmnt(ulong); int get_proc_version(void); int file_checksum(char *, long *); void dump_filesys_table(int); char *fill_file_cache(ulong); void clear_file_cache(void); char *fill_dentry_cache(ulong); void clear_dentry_cache(void); char *fill_inode_cache(ulong); void clear_inode_cache(void); int monitor_memory(long *, long *, long *, long *); int is_readable(char *); #define RADIX_TREE_COUNT (1) #define RADIX_TREE_SEARCH (2) #define RADIX_TREE_DUMP (3) #define RADIX_TREE_GATHER (4) #define RADIX_TREE_DUMP_CB (5) struct radix_tree_pair { ulong index; void *value; }; ulong do_radix_tree(ulong, int, struct radix_tree_pair *); int file_dump(ulong, ulong, ulong, int, int); #define DUMP_FULL_NAME 0x1 #define DUMP_INODE_ONLY 0x2 #define DUMP_DENTRY_ONLY 0x4 #define DUMP_EMPTY_FILE 0x8 #define DUMP_FILE_NRPAGES 0x10 #endif /* !GDB_COMMON */ int same_file(char *, char *); #ifndef GDB_COMMON int cleanup_memory_driver(void); /* * help.c */ #define HELP_COLUMNS 5 #define START_OF_HELP_DATA(X) "START_OF_HELP_DATA" X #define END_OF_HELP_DATA "END_OF_HELP_DATA" void help_init(void); void cmd_usage(char *, int); void display_version(void); void display_help_screen(char *); #ifdef ARM #define dump_machdep_table(X) arm_dump_machdep_table(X) #endif #ifdef ARM64 #define dump_machdep_table(X) arm64_dump_machdep_table(X) #endif #ifdef X86 #define dump_machdep_table(X) x86_dump_machdep_table(X) #endif #ifdef ALPHA #define dump_machdep_table(X) alpha_dump_machdep_table(X) #endif #ifdef PPC #define dump_machdep_table(X) ppc_dump_machdep_table(X) #endif #ifdef IA64 #define dump_machdep_table(X) ia64_dump_machdep_table(X) #endif #ifdef S390 #define dump_machdep_table(X) s390_dump_machdep_table(X) #endif #ifdef S390X #define dump_machdep_table(X) s390x_dump_machdep_table(X) #endif #ifdef X86_64 #define dump_machdep_table(X) x86_64_dump_machdep_table(X) #endif #ifdef PPC64 #define dump_machdep_table(X) ppc64_dump_machdep_table(X) #endif #ifdef MIPS #define dump_machdep_table(X) mips_dump_machdep_table(X) #endif extern char *help_pointer[]; extern char *help_alias[]; extern char *help_ascii[]; extern char *help_bt[]; extern char *help_btop[]; extern char *help_dev[]; extern char *help_dis[]; extern char *help_eval[]; extern char *help_exit[]; extern char *help_extend[]; extern char *help_files[]; extern char *help_foreach[]; extern char *help_fuser[]; extern char *help_gdb[]; extern char *help_help[]; extern char *help_irq[]; extern char *help_kmem[]; extern char *help__list[]; extern char *help_tree[]; extern char *help_log[]; extern char *help_mach[]; extern char *help_mod[]; extern char *help_mount[]; extern char *help_net[]; extern char *help_p[]; extern char *help_ps[]; extern char *help_pte[]; extern char *help_ptob[]; extern char *help_ptov[]; extern char *help_quit[]; extern char *help_rd[]; extern char *help_repeat[]; extern char *help_runq[]; extern char *help_ipcs[]; extern char *help_search[]; extern char *help_set[]; extern char *help_sig[]; extern char *help_struct[]; extern char *help_swap[]; extern char *help_sym[]; extern char *help_sys[]; extern char *help_task[]; extern char *help_timer[]; extern char *help_union[]; extern char *help_vm[]; extern char *help_vtop[]; extern char *help_waitq[]; extern char *help_whatis[]; extern char *help_wr[]; #if defined(S390) || defined(S390X) extern char *help_s390dbf[]; #endif extern char *help_map[]; /* * task.c */ void task_init(void); int set_context(ulong, ulong); void show_context(struct task_context *); ulong pid_to_task(ulong); ulong task_to_pid(ulong); int task_exists(ulong); int is_kernel_thread(ulong); int is_idle_thread(ulong); void get_idle_threads(ulong *, int); char *task_state_string(ulong, char *, int); ulong task_flags(ulong); ulong task_state(ulong); ulong task_mm(ulong, int); ulong task_tgid(ulong); ulonglong task_last_run(ulong); ulong vaddr_in_task_struct(ulong); int comm_exists(char *); struct task_context *task_to_context(ulong); struct task_context *pid_to_context(ulong); struct task_context *tgid_to_context(ulong); ulong stkptr_to_task(ulong); ulong task_to_thread_info(ulong); ulong task_to_stackbase(ulong); int str_to_context(char *, ulong *, struct task_context **); #define STR_PID (0x1) #define STR_TASK (0x2) #define STR_INVALID (0x4) char *get_panicmsg(char *); char *task_cpu(int, char *, int); void print_task_header(FILE *, struct task_context *, int); ulong get_active_task(int); int is_task_active(ulong); int is_panic_thread(ulong); int get_panic_ksp(struct bt_info *, ulong *); void foreach(struct foreach_data *); int pid_exists(ulong); #define TASKS_PER_PID(x) pid_exists(x) char *fill_task_struct(ulong); #define IS_LAST_TASK_READ(task) ((ulong)(task) == tt->last_task_read) char *fill_thread_info(ulong); #define IS_LAST_THREAD_INFO_READ(ti) ((ulong)(ti) == tt->last_thread_info_read) char *fill_mm_struct(ulong); #define IS_LAST_MM_READ(mm) ((ulong)(mm) == tt->last_mm_read) void do_task(ulong, ulong, struct reference *, unsigned int); void clear_task_cache(void); int get_active_set(void); void clear_active_set(void); void do_sig(ulong, ulong, struct reference *); void modify_signame(int, char *, char *); ulong generic_get_stackbase(ulong); ulong generic_get_stacktop(ulong); void dump_task_table(int); void sort_context_array(void); void sort_tgid_array(void); int sort_by_tgid(const void *, const void *); int in_irq_ctx(ulonglong, int, ulong); /* * extensions.c */ void register_extension(struct command_table_entry *); void dump_extension_table(int); void load_extension(char *); void unload_extension(char *); void preload_extensions(void); /* Hooks for sial */ unsigned long get_curtask(void); char *crash_global_cmd(void); struct command_table_entry *crash_cmd_table(void); /* * kernel.c */ void kernel_init(void); void module_init(void); void verify_version(void); void verify_spinlock(void); void non_matching_kernel(void); struct load_module *modref_to_load_module(char *); int load_module_symbols_helper(char *); void unlink_module(struct load_module *); int check_specified_module_tree(char *, char *); int is_system_call(char *, ulong); void generic_dump_irq(int); void generic_get_irq_affinity(int); void generic_show_interrupts(int, ulong *); int generic_dis_filter(ulong, char *, unsigned int); int kernel_BUG_encoding_bytes(void); void display_sys_stats(void); char *get_uptime(char *, ulonglong *); void clone_bt_info(struct bt_info *, struct bt_info *, struct task_context *); void dump_kernel_table(int); void dump_bt_info(struct bt_info *, char *where); void dump_log(int); #define SHOW_LOG_LEVEL (0x1) #define SHOW_LOG_DICT (0x2) #define SHOW_LOG_TEXT (0x4) void set_cpu(int); void clear_machdep_cache(void); struct stack_hook *gather_text_list(struct bt_info *); int get_cpus_online(void); int get_cpus_active(void); int get_cpus_present(void); int get_cpus_possible(void); int check_offline_cpu(int); int hide_offline_cpu(int); int get_highest_cpu_online(void); int get_highest_cpu_present(void); int get_cpus_to_display(void); void get_log_from_vmcoreinfo(char *file); int in_cpu_map(int, int); void paravirt_init(void); void print_stack_text_syms(struct bt_info *, ulong, ulong); void back_trace(struct bt_info *); int in_alternate_stack(int, ulong); ulong cpu_map_addr(const char *type); #define BT_RAW (0x1ULL) #define BT_SYMBOLIC_ARGS (0x2ULL) #define BT_FULL (0x4ULL) #define BT_TEXT_SYMBOLS (0x8ULL) #define BT_TEXT_SYMBOLS_PRINT (0x10ULL) #define BT_TEXT_SYMBOLS_NOPRINT (0x20ULL) #define BT_USE_GDB (0x40ULL) #define BT_EXCEPTION_FRAME (0x80ULL) #define BT_LINE_NUMBERS (0x100ULL) #define BT_USER_EFRAME (0x200ULL) #define BT_INCOMPLETE_USER_EFRAME (BT_USER_EFRAME) #define BT_SAVE_LASTSP (0x400ULL) #define BT_FROM_EXCEPTION (0x800ULL) #define BT_FROM_CALLFRAME (0x1000ULL) #define BT_EFRAME_SEARCH (0x2000ULL) #define BT_SPECULATE (0x4000ULL) #define BT_FRAMESIZE_DISABLE (BT_SPECULATE) #define BT_RESCHEDULE (0x8000ULL) #define BT_SCHEDULE (BT_RESCHEDULE) #define BT_RET_FROM_SMP_FORK (0x10000ULL) #define BT_STRACE (0x20000ULL) #define BT_KDUMP_ADJUST (BT_STRACE) #define BT_KSTACKP (0x40000ULL) #define BT_LOOP_TRAP (0x80000ULL) #define BT_BUMP_FRAME_LEVEL (0x100000ULL) #define BT_EFRAME_COUNT (0x200000ULL) #define BT_CPU_IDLE (0x400000ULL) #define BT_WRAP_TRAP (0x800000ULL) #define BT_KERNEL_THREAD (0x1000000ULL) #define BT_ERROR_MASK (BT_LOOP_TRAP|BT_WRAP_TRAP|BT_KERNEL_THREAD|BT_CPU_IDLE) #define BT_UNWIND_ERROR (0x2000000ULL) #define BT_OLD_BACK_TRACE (0x4000000ULL) #define BT_FRAMESIZE_DEBUG (0x8000000ULL) #define BT_CONTEXT_SWITCH (0x10000000ULL) #define BT_HARDIRQ (0x20000000ULL) #define BT_SOFTIRQ (0x40000000ULL) #define BT_CHECK_CALLER (0x80000000ULL) #define BT_NO_CHECK_CALLER (0x100000000ULL) #define BT_EXCEPTION_STACK (0x200000000ULL) #define BT_IRQSTACK (0x400000000ULL) #define BT_DUMPFILE_SEARCH (0x800000000ULL) #define BT_EFRAME_SEARCH2 (0x1000000000ULL) #define BT_START (0x2000000000ULL) #define BT_TEXT_SYMBOLS_ALL (0x4000000000ULL) #define BT_XEN_STOP_THIS_CPU (0x8000000000ULL) #define BT_THREAD_GROUP (0x10000000000ULL) #define BT_SAVE_EFRAME_IP (0x20000000000ULL) #define BT_FULL_SYM_SLAB (0x40000000000ULL) #define BT_KDUMP_ELF_REGS (0x80000000000ULL) #define BT_USER_SPACE (0x100000000000ULL) #define BT_KERNEL_SPACE (0x200000000000ULL) #define BT_FULL_SYM_SLAB2 (0x400000000000ULL) #define BT_EFRAME_TARGET (0x800000000000ULL) #define BT_CPUMASK (0x1000000000000ULL) #define BT_SHOW_ALL_REGS (0x2000000000000ULL) #define BT_REGS_NOT_FOUND (0x4000000000000ULL) #define BT_SYMBOL_OFFSET (BT_SYMBOLIC_ARGS) #define BT_REF_HEXVAL (0x1) #define BT_REF_SYMBOL (0x2) #define BT_REF_FOUND (0x4) #define BT_REFERENCE_CHECK(X) ((X)->ref) #define BT_REFERENCE_FOUND(X) ((X)->ref && ((X)->ref->cmdflags & BT_REF_FOUND)) #define NO_MODULES() \ (!kt->module_list || (kt->module_list == kt->kernel_module)) #define USER_EFRAME_ADDR(task) \ ((ulong)task + UNION_SIZE("task_union") - SIZE(pt_regs)) struct remote_file { char *filename; char *local; int fd; int flags; int type; long csum; off_t size; }; #define REMOTE_VERBOSE (O_RDWR << 1) #define REMOTE_COPY_DONE (REMOTE_VERBOSE << 1) #define TYPE_ELF (REMOTE_VERBOSE << 2) #define TYPE_DEVMEM (REMOTE_VERBOSE << 3) #define TYPE_MCLXCD (REMOTE_VERBOSE << 4) #define TYPE_LKCD (REMOTE_VERBOSE << 5) #define TYPE_S390D (REMOTE_VERBOSE << 6) #define TYPE_NETDUMP (REMOTE_VERBOSE << 7) ulonglong xen_m2p(ulonglong); void read_in_kernel_config(int); #define IKCFG_INIT (0) #define IKCFG_READ (1) #define IKCFG_SETUP (2) #define IKCFG_FREE (3) int get_kernel_config(char *, char **); enum { IKCONFIG_N, IKCONFIG_Y, IKCONFIG_M, IKCONFIG_STR, }; #define MAGIC_START "IKCFG_ST" #define MAGIC_END "IKCFG_ED" #define MAGIC_SIZE (sizeof(MAGIC_START) - 1) /* * dev.c */ void dev_init(void); void dump_dev_table(void); #ifdef ARM void arm_init(int); void arm_dump_machdep_table(ulong); int arm_is_vmalloc_addr(ulong); void arm_dump_backtrace_entry(struct bt_info *, int, ulong, ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to ARM architecture\n") struct arm_pt_regs { ulong uregs[18]; }; #define ARM_cpsr uregs[16] #define ARM_pc uregs[15] #define ARM_lr uregs[14] #define ARM_sp uregs[13] #define ARM_ip uregs[12] #define ARM_fp uregs[11] #define ARM_r10 uregs[10] #define ARM_r9 uregs[9] #define ARM_r8 uregs[8] #define ARM_r7 uregs[7] #define ARM_r6 uregs[6] #define ARM_r5 uregs[5] #define ARM_r4 uregs[4] #define ARM_r3 uregs[3] #define ARM_r2 uregs[2] #define ARM_r1 uregs[1] #define ARM_r0 uregs[0] #define ARM_ORIG_r0 uregs[17] #define KSYMS_START (0x1) #define PHYS_BASE (0x2) #define PGTABLE_V2 (0x4) #define IDMAP_PGD (0x8) #define KVBASE_MASK (0x1ffffff) struct machine_specific { ulong phys_base; ulong vmalloc_start_addr; ulong modules_vaddr; ulong modules_end; ulong kernel_text_start; ulong kernel_text_end; ulong exception_text_start; ulong exception_text_end; ulonglong last_pgd_read_lpae; ulonglong last_pmd_read_lpae; ulonglong last_ptbl_read_lpae; struct arm_pt_regs *crash_task_regs; int unwind_index_prel31; }; int init_unwind_tables(void); void unwind_backtrace(struct bt_info *); #endif /* ARM */ /* * arm64.c */ #ifdef ARM64 void arm64_init(int); void arm64_dump_machdep_table(ulong); int arm64_IS_VMALLOC_ADDR(ulong); ulong arm64_swp_type(ulong); ulong arm64_swp_offset(ulong); #endif /* * alpha.c */ #ifdef ALPHA void alpha_init(int); void alpha_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to alpha architecture\n") #define HWRESET_TASK(X) ((machdep->flags & HWRESET) && is_task_active(X) && \ (task_to_context(X)->processor == 0)) #endif /* * x86.c */ #ifdef X86 void x86_init(int); void x86_dump_machdep_table(ulong); void x86_display_idt_table(void); #define display_idt_table() x86_display_idt_table() #define KSYMS_START (0x1) void x86_dump_eframe_common(struct bt_info *bt, ulong *, int); char *x86_function_called_by(ulong); struct syment *x86_jmp_error_code(ulong); struct syment *x86_text_lock_jmp(ulong, ulong *); struct machine_specific { ulong *idt_table; ulong entry_tramp_start; ulong entry_tramp_end; physaddr_t entry_tramp_start_phys; ulonglong last_pmd_read_PAE; ulonglong last_ptbl_read_PAE; ulong page_protnone; int max_numnodes; ulong *remap_start_vaddr; ulong *remap_end_vaddr; ulong *remap_start_pfn; }; struct syment *x86_is_entry_tramp_address(ulong, ulong *); #endif /* * x86_64.c */ #ifdef X86_64 void x86_64_init(int); void x86_64_dump_machdep_table(ulong); ulong x86_64_PTOV(ulong); ulong x86_64_VTOP(ulong); int x86_64_IS_VMALLOC_ADDR(ulong); void x86_64_display_idt_table(void); #define display_idt_table() x86_64_display_idt_table() long x86_64_exception_frame(ulong, ulong, char *, struct bt_info *, FILE *); #define EFRAME_INIT (0) struct x86_64_pt_regs_offsets { long r15; long r14; long r13; long r12; long rbp; long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ long r11; long r10; long r9; long r8; long rax; long rcx; long rdx; long rsi; long rdi; long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ long rip; long cs; long eflags; long rsp; long ss; }; #define MAX_EXCEPTION_STACKS 7 #define NMI_STACK (machdep->machspec->stkinfo.NMI_stack_index) struct x86_64_stkinfo { ulong ebase[NR_CPUS][MAX_EXCEPTION_STACKS]; int esize[MAX_EXCEPTION_STACKS]; ulong ibase[NR_CPUS]; int isize; int NMI_stack_index; char *exception_stacks[MAX_EXCEPTION_STACKS]; }; struct machine_specific { ulong userspace_top; ulong page_offset; ulong vmalloc_start_addr; ulong vmalloc_end; ulong vmemmap_vaddr; ulong vmemmap_end; ulong modules_vaddr; ulong modules_end; ulong phys_base; char *pml4; char *upml; ulong last_upml_read; ulong last_pml4_read; char *irqstack; ulong irq_eframe_link; struct x86_64_pt_regs_offsets pto; struct x86_64_stkinfo stkinfo; ulong *current; ulong *crash_nmi_rsp; ulong vsyscall_page; ulong thread_return; ulong page_protnone; ulong GART_start; ulong GART_end; }; #define KSYMS_START (0x1) #define PT_REGS_INIT (0x2) #define VM_ORIG (0x4) #define VM_2_6_11 (0x8) #define VM_XEN (0x10) #define NO_TSS (0x20) #define SCHED_TEXT (0x40) #define PHYS_BASE (0x80) #define VM_XEN_RHEL4 (0x100) #define FRAMEPOINTER (0x200) #define GART_REGION (0x400) #define NESTED_NMI (0x800) #define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4) #define _2MB_PAGE_MASK (~((MEGABYTES(2))-1)) #endif #if defined(X86) || defined(X86_64) /* * unwind_x86_32_64.c */ void init_unwind_table(void); int dwarf_backtrace(struct bt_info *, int, ulong); void dwarf_debug(struct bt_info *); int dwarf_print_stack_entry(struct bt_info *, int); #endif /* * ppc64.c */ /* * This structure was copied from kernel source * in include/asm-ppc/ptrace.h */ struct ppc64_pt_regs { long gpr[32]; long nip; long msr; long orig_gpr3; /* Used for restarting system calls */ long ctr; long link; long xer; long ccr; long mq; /* 601 only (not used at present) */ /* Used on APUS to hold IPL value. */ long trap; /* Reason for being here */ long dar; /* Fault registers */ long dsisr; long result; /* Result of a system call */ }; struct ppc64_elf_siginfo { int si_signo; int si_code; int si_errno; }; struct ppc64_elf_prstatus { struct ppc64_elf_siginfo pr_info; short pr_cursig; unsigned long pr_sigpend; unsigned long pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct timeval pr_utime; struct timeval pr_stime; struct timeval pr_cutime; struct timeval pr_cstime; struct ppc64_pt_regs pr_reg; int pr_fpvalid; }; #ifdef PPC64 struct ppc64_vmemmap { unsigned long phys; unsigned long virt; }; /* * Used to store the HW interrupt stack. It is only for 2.4. */ struct machine_specific { ulong hwintrstack[NR_CPUS]; char *hwstackbuf; uint hwstacksize; char *level4; ulong last_level4_read; uint l4_index_size; uint l3_index_size; uint l2_index_size; uint l1_index_size; uint ptrs_per_l3; uint ptrs_per_l2; uint ptrs_per_l1; uint l4_shift; uint l3_shift; uint l2_shift; uint l1_shift; uint pte_shift; uint l2_masked_bits; int vmemmap_cnt; int vmemmap_psize; ulong vmemmap_base; struct ppc64_vmemmap *vmemmap_list; ulong _page_present; ulong _page_user; ulong _page_rw; ulong _page_guarded; ulong _page_coherent; ulong _page_no_cache; ulong _page_writethru; ulong _page_dirty; ulong _page_accessed; int (*is_kvaddr)(ulong); int (*is_vmaddr)(ulong); }; #define IS_LAST_L4_READ(l4) ((ulong)(l4) == machdep->machspec->last_level4_read) #define FILL_L4(L4, TYPE, SIZE) \ if (!IS_LAST_L4_READ(L4)) { \ readmem((ulonglong)((ulong)(L4)), TYPE, machdep->machspec->level4, \ SIZE, "level4 page", FAULT_ON_ERROR); \ machdep->machspec->last_level4_read = (ulong)(L4); \ } void ppc64_init(int); void ppc64_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to PowerPC architecture\n") #define KSYMS_START (0x1) #define VM_ORIG (0x2) #define VMEMMAP_AWARE (0x4) #define BOOK3E (0x8) #define REGION_SHIFT (60UL) #define REGION_ID(addr) (((unsigned long)(addr)) >> REGION_SHIFT) #define VMEMMAP_REGION_ID (0xfUL) #endif /* * ppc.c */ #ifdef PPC void ppc_init(int); void ppc_dump_machdep_table(ulong); void ppc_relocate_nt_prstatus_percpu(void **, uint *); #define display_idt_table() \ error(FATAL, "-d option is not applicable to PowerPC architecture\n") #define KSYMS_START (0x1) /* This should match PPC_FEATURE_BOOKE from include/asm-powerpc/cputable.h */ #define CPU_BOOKE (0x00008000) #else #define ppc_relocate_nt_prstatus_percpu(X,Y) do {} while (0) #endif /* * lkcd_fix_mem.c */ struct _dump_header_asm_s; struct _dump_header_s; ulong get_lkcd_switch_stack(ulong); int fix_addr_v8(struct _dump_header_asm_s *); int lkcd_dump_init_v8_arch(struct _dump_header_s *dh); int fix_addr_v7(int); int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp); int lkcd_get_kernel_start_v8(ulong *addr); /* * lkcd_v8.c */ int get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp); /* * ia64.c */ #ifdef IA64 void ia64_init(int); void ia64_dump_machdep_table(ulong); void ia64_dump_line_number(ulong); ulong ia64_get_switch_stack(ulong); void ia64_exception_frame(ulong, struct bt_info *bt); ulong ia64_PTOV(ulong); ulong ia64_VTOP(ulong); int ia64_IS_VMALLOC_ADDR(ulong); #define display_idt_table() \ error(FATAL, "-d option TBD on ia64 architecture\n"); int ia64_in_init_stack(ulong addr); int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt); physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo); #define OLD_UNWIND (0x1) /* CONFIG_IA64_NEW_UNWIND not turned on */ #define NEW_UNWIND (0x2) /* CONFIG_IA64_NEW_UNWIND turned on */ #define NEW_UNW_V1 (0x4) #define NEW_UNW_V2 (0x8) #define NEW_UNW_V3 (0x10) #define UNW_OUT_OF_SYNC (0x20) /* shared data structures out of sync */ #define UNW_READ (0x40) /* kernel unw has been read successfully */ #define MEM_LIMIT (0x80) #define UNW_PTREGS (0x100) #define UNW_R0 (0x200) #undef IA64_RBS_OFFSET #undef IA64_STK_OFFSET #define IA64_RBS_OFFSET ((SIZE(task_struct) + 15) & ~15) #define IA64_STK_OFFSET (STACKSIZE()) struct machine_specific { ulong cpu_data_address; ulong unimpl_va_mask; ulong unimpl_pa_mask; long unw_tables_offset; long unw_kernel_table_offset; long unw_pt_regs_offsets; int script_index; struct unw_script *script_cache; ulong script_cache_fills; ulong script_cache_hits; void *unw; ulong mem_limit; ulong kernel_region; ulong kernel_start; ulong phys_start; ulong vmalloc_start; char *ia64_memmap; uint64_t efi_memmap_size; uint64_t efi_memdesc_size; void (*unwind_init)(void); void (*unwind)(struct bt_info *); void (*dump_unwind_stats)(void); int (*unwind_debug)(ulong); int ia64_init_stack_size; }; /* * unwind.c */ void unwind_init_v1(void); void unwind_v1(struct bt_info *); void dump_unwind_stats_v1(void); int unwind_debug_v1(ulong); void unwind_init_v2(void); void unwind_v2(struct bt_info *); void dump_unwind_stats_v2(void); int unwind_debug_v2(ulong); void unwind_init_v3(void); void unwind_v3(struct bt_info *); void dump_unwind_stats_v3(void); int unwind_debug_v3(ulong); #endif /* IA64 */ /* * s390.c */ #ifdef S390 void s390_init(int); void s390_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to S390 architecture\n") #define KSYMS_START (0x1) #endif /* * s390_dump.c */ int is_s390_dump(char *); FILE* s390_dump_init(char *); int read_s390_dumpfile(int, void *, int, ulong, physaddr_t); int write_s390_dumpfile(int, void *, int, ulong, physaddr_t); uint s390_page_size(void); int s390_memory_used(void); int s390_free_memory(void); int s390_memory_dump(FILE *); ulong get_s390_panic_task(void); void get_s390_panicmsg(char *); /* * s390x.c */ #ifdef S390X void s390x_init(int); void s390x_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to S390X architecture\n") #define KSYMS_START (0x1) #endif #ifdef MIPS void mips_init(int); void mips_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to MIPS architecture\n") struct mips_regset { ulong regs[45]; }; struct mips_pt_regs_main { ulong regs[32]; ulong cp0_status; ulong hi; ulong lo; }; struct mips_pt_regs_cp0 { ulong cp0_badvaddr; ulong cp0_cause; ulong cp0_epc; }; #define KSYMS_START (0x1) #define PHYS_BASE (0x2) #define KVBASE_MASK (0x1ffffff) struct machine_specific { ulong phys_base; ulong vmalloc_start_addr; ulong modules_vaddr; ulong modules_end; ulong _page_present; ulong _page_read; ulong _page_write; ulong _page_accessed; ulong _page_modified; ulong _page_global; ulong _page_valid; ulong _page_no_read; ulong _page_no_exec; ulong _page_dirty; ulong _pfn_shift; #define _PAGE_PRESENT (machdep->machspec->_page_present) #define _PAGE_READ (machdep->machspec->_page_read) #define _PAGE_WRITE (machdep->machspec->_page_write) #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) #define _PAGE_MODIFIED (machdep->machspec->_page_modified) #define _PAGE_GLOBAL (machdep->machspec->_page_global) #define _PAGE_VALID (machdep->machspec->_page_valid) #define _PAGE_NO_READ (machdep->machspec->_page_no_read) #define _PAGE_NO_EXEC (machdep->machspec->_page_no_exec) #define _PAGE_DIRTY (machdep->machspec->_page_dirty) #define _PFN_SHIFT (machdep->machspec->_pfn_shift) }; #endif /* MIPS */ /* * netdump.c */ int is_netdump(char *, ulong); uint netdump_page_size(void); int read_netdump(int, void *, int, ulong, physaddr_t); int write_netdump(int, void *, int, ulong, physaddr_t); int netdump_free_memory(void); int netdump_memory_used(void); int netdump_init(char *, FILE *); ulong get_netdump_panic_task(void); ulong get_netdump_switch_stack(ulong); FILE *set_netdump_fp(FILE *); int netdump_memory_dump(FILE *); void get_netdump_regs(struct bt_info *, ulong *, ulong *); int is_partial_netdump(void); void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); void dump_registers_for_elf_dumpfiles(void); struct vmcore_data; struct vmcore_data *get_kdump_vmcore_data(void); int read_kdump(int, void *, int, ulong, physaddr_t); int write_kdump(int, void *, int, ulong, physaddr_t); int is_kdump(char *, ulong); int kdump_init(char *, FILE *); ulong get_kdump_panic_task(void); uint kdump_page_size(void); int kdump_free_memory(void); int kdump_memory_used(void); int kdump_memory_dump(FILE *); void get_kdump_regs(struct bt_info *, ulong *, ulong *); void xen_kdump_p2m_mfn(char *); int is_sadump_xen(void); void set_xen_phys_start(char *); ulong xen_phys_start(void); int xen_major_version(void); int xen_minor_version(void); int get_netdump_arch(void); int exist_regs_in_elf_notes(struct task_context *); void *get_regs_from_elf_notes(struct task_context *); void map_cpus_to_prstatus(void); int arm_kdump_phys_base(ulong *); int is_proc_kcore(char *, ulong); int proc_kcore_init(FILE *); int read_proc_kcore(int, void *, int, ulong, physaddr_t); int write_proc_kcore(int, void *, int, ulong, physaddr_t); int kcore_memory_dump(FILE *); void dump_registers_for_qemu_mem_dump(void); void kdump_backup_region_init(void); void display_regs_from_elf_notes(int, FILE *); void display_ELF_note(int, int, void *, FILE *); void *netdump_get_prstatus_percpu(int); #define PRSTATUS_NOTE (1) #define QEMU_NOTE (2) /* * ramdump.c */ int is_ramdump(char *pattern); char *ramdump_to_elf(void); void ramdump_elf_output_file(char *opt); void ramdump_cleanup(void); int read_ramdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr); void show_ramdump_files(void); void dump_ramdump_data(void); int is_ramdump_image(void); /* * diskdump.c */ int is_diskdump(char *); uint diskdump_page_size(void); int read_diskdump(int, void *, int, ulong, physaddr_t); int write_diskdump(int, void *, int, ulong, physaddr_t); int diskdump_free_memory(void); int diskdump_memory_used(void); int diskdump_init(char *, FILE *); ulong get_diskdump_panic_task(void); ulong get_diskdump_switch_stack(ulong); int diskdump_memory_dump(FILE *); FILE *set_diskdump_fp(FILE *); void get_diskdump_regs(struct bt_info *, ulong *, ulong *); int diskdump_phys_base(unsigned long *); ulong *diskdump_flags; int is_partial_diskdump(void); int dumpfile_is_split(void); void show_split_dumpfiles(void); void x86_process_elf_notes(void *, unsigned long); void *diskdump_get_prstatus_percpu(int); void map_cpus_to_prstatus_kdump_cmprs(void); void diskdump_display_regs(int, FILE *); void process_elf32_notes(void *, ulong); void process_elf64_notes(void *, ulong); void dump_registers_for_compressed_kdump(void); /* * makedumpfile.c */ void check_flattened_format(char *file); int is_flattened_format(char *file); int read_flattened_format(int fd, off_t offset, void *buf, size_t size); void dump_flat_header(FILE *); /* * xendump.c */ int is_xendump(char *); int read_xendump(int, void *, int, ulong, physaddr_t); int write_xendump(int, void *, int, ulong, physaddr_t); uint xendump_page_size(void); int xendump_free_memory(void); int xendump_memory_used(void); int xendump_init(char *, FILE *); int xendump_memory_dump(FILE *); ulong get_xendump_panic_task(void); void get_xendump_regs(struct bt_info *, ulong *, ulong *); char *xc_core_mfn_to_page(ulong, char *); int xc_core_mfn_to_page_index(ulong); void xendump_panic_hook(char *); int read_xendump_hyper(int, void *, int, ulong, physaddr_t); struct xendump_data *get_xendump_data(void); /* * kvmdump.c */ int is_kvmdump(char *); int is_kvmdump_mapfile(char *); int kvmdump_init(char *, FILE *); int read_kvmdump(int, void *, int, ulong, physaddr_t); int write_kvmdump(int, void *, int, ulong, physaddr_t); int kvmdump_free_memory(void); int kvmdump_memory_used(void); int kvmdump_memory_dump(FILE *); void get_kvmdump_regs(struct bt_info *, ulong *, ulong *); ulong get_kvmdump_panic_task(void); int kvmdump_phys_base(unsigned long *); void kvmdump_display_regs(int, FILE *); void set_kvmhost_type(char *); void set_kvm_iohole(char *); struct kvm_register_set { union { uint32_t cs; uint32_t ss; uint32_t ds; uint32_t es; uint32_t fs; uint32_t gs; uint64_t ip; uint64_t flags; uint64_t regs[16]; } x86; }; int get_kvm_register_set(int, struct kvm_register_set *); /* * sadump.c */ int is_sadump(char *); uint sadump_page_size(void); int read_sadump(int, void *, int, ulong, physaddr_t); int write_sadump(int, void *, int, ulong, physaddr_t); int sadump_init(char *, FILE *); int sadump_is_diskset(void); ulong get_sadump_panic_task(void); ulong get_sadump_switch_stack(ulong); int sadump_memory_used(void); int sadump_free_memory(void); int sadump_memory_dump(FILE *); FILE *set_sadump_fp(FILE *); void get_sadump_regs(struct bt_info *bt, ulong *ipp, ulong *spp); void sadump_display_regs(int, FILE *); int sadump_phys_base(ulong *); void sadump_show_diskset(void); int sadump_is_zero_excluded(void); void sadump_set_zero_excluded(void); void sadump_unset_zero_excluded(void); struct sadump_data; struct sadump_data *get_sadump_data(void); /* * qemu.c */ int qemu_init(char *); /* * qemu-load.c */ int is_qemu_vm_file(char *); void dump_qemu_header(FILE *); /* * net.c */ void net_init(void); void dump_net_table(void); void dump_sockets_workhorse(ulong, ulong, struct reference *); /* * remote.c */ int is_remote_daemon(char *); physaddr_t get_remote_phys_base(physaddr_t, physaddr_t); physaddr_t remote_vtop(int, physaddr_t); int get_remote_regs(struct bt_info *, ulong *, ulong *); physaddr_t get_remote_cr3(int); void remote_fd_init(void); int get_remote_file(struct remote_file *); uint remote_page_size(void); int find_remote_module_objfile(struct load_module *lm, char *, char *); int remote_free_memory(void); int remote_memory_dump(int); int remote_memory_used(void); void remote_exit(void); int remote_execute(void); void remote_clear_pipeline(void); int remote_memory_read(int, char *, int, physaddr_t, int); /* * vmware_vmss.c */ int is_vmware_vmss(char *filename); int vmware_vmss_init(char *filename, FILE *ofp); uint vmware_vmss_page_size(void); int read_vmware_vmss(int, void *, int, ulong, physaddr_t); int write_vmware_vmss(int, void *, int, ulong, physaddr_t); /* * gnu_binutils.c */ /* NO LONGER IN USE */ /* * test.c */ void cmd_template(void); void foreach_test(ulong, ulong); /* * va_server.c */ int mclx_page_size(void); int vas_memory_used(void); int vas_memory_dump(FILE *); int vas_free_memory(char *); void set_vas_debug(ulong); size_t vas_write(void *, size_t); int va_server_init(char *, ulong *, ulong *, ulong *); size_t vas_read(void *, size_t); int vas_lseek(ulong, int); /* * lkcd_x86_trace.c */ int lkcd_x86_back_trace(struct bt_info *, int, FILE *); /* * lkcd_common.c */ int lkcd_dump_init(FILE *, int, char *); ulong get_lkcd_panic_task(void); void get_lkcd_panicmsg(char *); int is_lkcd_compressed_dump(char *); void dump_lkcd_environment(ulong); int lkcd_lseek(physaddr_t); long lkcd_read(void *, long); void set_lkcd_debug(ulong); FILE *set_lkcd_fp(FILE *); uint lkcd_page_size(void); int lkcd_memory_used(void); int lkcd_memory_dump(FILE *); int lkcd_free_memory(void); void lkcd_print(char *, ...); void set_remote_lkcd_panic_data(ulong, char *); void set_lkcd_nohash(void); int lkcd_load_dump_page_header(void *, ulong); void lkcd_dumpfile_complaint(uint32_t, uint32_t, int); int set_mb_benchmark(ulong); ulonglong fix_lkcd_address(ulonglong); int lkcd_get_kernel_start(ulong *addr); int get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp); /* * lkcd_v1.c */ int lkcd_dump_init_v1(FILE *, int); void dump_dump_page_v1(char *, void *); void dump_lkcd_environment_v1(ulong); uint32_t get_dp_size_v1(void); uint32_t get_dp_flags_v1(void); uint64_t get_dp_address_v1(void); /* * lkcd_v2_v3.c */ int lkcd_dump_init_v2_v3(FILE *, int); void dump_dump_page_v2_v3(char *, void *); void dump_lkcd_environment_v2_v3(ulong); uint32_t get_dp_size_v2_v3(void); uint32_t get_dp_flags_v2_v3(void); uint64_t get_dp_address_v2_v3(void); /* * lkcd_v5.c */ int lkcd_dump_init_v5(FILE *, int); void dump_dump_page_v5(char *, void *); void dump_lkcd_environment_v5(ulong); uint32_t get_dp_size_v5(void); uint32_t get_dp_flags_v5(void); uint64_t get_dp_address_v5(void); /* * lkcd_v7.c */ int lkcd_dump_init_v7(FILE *, int, char *); void dump_dump_page_v7(char *, void *); void dump_lkcd_environment_v7(ulong); uint32_t get_dp_size_v7(void); uint32_t get_dp_flags_v7(void); uint64_t get_dp_address_v7(void); /* * lkcd_v8.c */ int lkcd_dump_init_v8(FILE *, int, char *); void dump_dump_page_v8(char *, void *); void dump_lkcd_environment_v8(ulong); uint32_t get_dp_size_v8(void); uint32_t get_dp_flags_v8(void); uint64_t get_dp_address_v8(void); #ifdef LKCD_COMMON /* * Until they differ across versions, these remain usable in the common * routines in lkcd_common.c */ #define LKCD_DUMP_MAGIC_NUMBER (0xa8190173618f23edULL) #define LKCD_DUMP_MAGIC_LIVE (0xa8190173618f23cdULL) #define LKCD_DUMP_V1 (0x1) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V2 (0x2) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V3 (0x3) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V5 (0x5) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V6 (0x6) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V7 (0x7) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V8 (0x8) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V9 (0x9) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V10 (0xa) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_VERSION_NUMBER_MASK (0xf) #define LKCD_DUMP_RAW (0x1) /* DUMP_[DH_]RAW */ #define LKCD_DUMP_COMPRESSED (0x2) /* DUMP_[DH_]COMPRESSED */ #define LKCD_DUMP_END (0x4) /* DUMP_[DH_]END */ #define LKCD_DUMP_COMPRESS_NONE (0x0) /* DUMP_COMPRESS_NONE */ #define LKCD_DUMP_COMPRESS_RLE (0x1) /* DUMP_COMPRESS_RLE */ #define LKCD_DUMP_COMPRESS_GZIP (0x2) /* DUMP_COMPRESS_GZIP */ #define LKCD_DUMP_MCLX_V0 (0x80000000) /* MCLX mod of LKCD */ #define LKCD_DUMP_MCLX_V1 (0x40000000) /* Extra page header data */ #define LKCD_OFFSET_TO_FIRST_PAGE (65536) #define MCLX_PAGE_HEADERS (4096) #define MCLX_V1_PAGE_HEADER_CACHE ((sizeof(uint64_t)) * MCLX_PAGE_HEADERS) /* * lkcd_load_dump_page_header() return values */ #define LKCD_DUMPFILE_OK (0) #define LKCD_DUMPFILE_EOF (1) #define LKCD_DUMPFILE_END (2) /* * Common handling of LKCD dump environment */ #define LKCD_CACHED_PAGES (16) #define LKCD_PAGE_HASH (32) #define LKCD_DUMP_HEADER_ONLY (1) /* arguments to lkcd_dump_environment */ #define LKCD_DUMP_PAGE_ONLY (2) #define LKCD_VALID (0x1) /* flags */ #define LKCD_REMOTE (0x2) #define LKCD_NOHASH (0x4) #define LKCD_MCLX (0x8) #define LKCD_BAD_DUMP (0x10) struct page_hash_entry { uint32_t pg_flags; uint64_t pg_addr; off_t pg_hdr_offset; struct page_hash_entry *next; }; struct page_desc { off_t offset; /* lseek offset in dump file */ }; struct physmem_zone { uint64_t start; struct page_desc *pages; }; struct fix_addrs { ulong task; ulong saddr; ulong sw; }; struct lkcd_environment { int fd; /* dumpfile file descriptor */ ulong flags; /* flags from above */ ulong debug; /* shadow of pc->debug */ FILE *fp; /* abstracted fp for fprintf */ void *dump_header; /* header stash, v1 or v2 */ void *dump_header_asm; /* architecture specific header for v2 */ void *dump_header_asm_smp; /* architecture specific header for v7 & v8 */ void *dump_page; /* current page header holder */ uint32_t version; /* version number of this dump */ uint32_t page_size; /* size of a Linux memory page */ int page_shift; /* byte address to page */ int bits; /* processor bitsize */ ulong panic_task; /* panic task address */ char *panic_string; /* pointer to stashed panic string */ uint32_t compression; /* compression type */ uint32_t (*get_dp_size)(void); /* returns current page's dp_size */ uint32_t (*get_dp_flags)(void); /* returns current page's dp_size */ uint64_t (*get_dp_address)(void); /* returns current page's dp_address*/ size_t page_header_size; /* size of version's page header */ unsigned long curpos; /* offset into current page */ uint64_t curpaddr; /* current page's physical address */ off_t curhdroffs; /* current page's header offset */ char *curbufptr; /* pointer to uncompressed page buffer */ uint64_t kvbase; /* physical-to-LKCD page address format*/ char *page_cache_buf; /* base of cached buffer pages */ char *compressed_page; /* copy of compressed page data */ int evict_index; /* next page to evict */ ulong evictions; /* total evictions done */ struct page_cache_hdr { /* header for each cached page */ uint32_t pg_flags; uint64_t pg_addr; char *pg_bufptr; ulong pg_hit_count; } page_cache_hdr[LKCD_CACHED_PAGES]; struct page_hash_entry *page_hash; ulong total_pages; ulong benchmark_pages; ulong benchmarks_done; off_t *mb_hdr_offsets; ulong total_reads; ulong cached_reads; ulong hashed_reads; ulong hashed; ulong compressed; ulong raw; /* lkcd_v7 additions */ char *dumpfile_index; /* array of offsets for each page */ int ifd; /* index file for dump (LKCD V7+) */ long memory_pages; /* Mamimum index of dump pages */ off_t page_offset_max; /* Offset of page with greatest offset seen so far */ long page_index_max; /* Index of page with greatest offset seen so far */ off_t *page_offsets; /* Pointer to huge array with seek offsets */ /* NB: There are no holes in the array */ struct physmem_zone *zones; /* Array of physical memory zones */ int num_zones; /* Number of zones initialized */ int max_zones; /* Size of the zones array */ long zoned_offsets; /* Number of stored page offsets */ uint64_t zone_mask; int zone_shift; int fix_addr_num; /* Number of active stacks to switch to saved values */ struct fix_addrs *fix_addr; /* Array of active stacks to switch to saved values */ }; #define ZONE_ALLOC 128 #define ZONE_SIZE (MEGABYTES(512)) #define MEGABYTE_ALIGNED(vaddr) (!((uint64_t)(vaddr) & MEGABYTE_MASK)) #define LKCD_PAGE_HASH_INDEX(paddr) \ (((paddr) >> lkcd->page_shift) % LKCD_PAGE_HASH) #define LKCD_PAGES_PER_MEGABYTE() (MEGABYTES(1) / lkcd->page_size) #define LKCD_PAGE_MEGABYTE(page) ((page) / LKCD_PAGES_PER_MEGABYTE()) #define LKCD_BENCHMARKS_DONE() (lkcd->benchmarks_done >= lkcd->benchmark_pages) #define LKCD_VALID_PAGE(flags) ((flags) & LKCD_VALID) extern struct lkcd_environment *lkcd; #define LKCD_DEBUG(x) (lkcd->debug >= (x)) #undef BITS #undef BITS32 #undef BITS64 #define BITS() (lkcd->bits) #define BITS32() (lkcd->bits == 32) #define BITS64() (lkcd->bits == 64) #endif /* LKCD_COMMON */ /* * gdb_interface.c */ void gdb_main_loop(int, char **); void display_gdb_banner(void); void get_gdb_version(void); void gdb_session_init(void); void gdb_interface(struct gnu_request *); int gdb_pass_through(char *, FILE *, ulong); int gdb_readmem_callback(ulong, void *, int, int); int gdb_line_number_callback(ulong, ulong, ulong); int gdb_print_callback(ulong); void gdb_error_hook(void); void restore_gdb_sanity(void); int is_gdb_command(int, ulong); char *gdb_command_string(int, char *, int); void dump_gnu_request(struct gnu_request *, int); int gdb_CRASHDEBUG(ulong); void dump_gdb_data(void); void update_gdb_hooks(void); void gdb_readnow_warning(void); int gdb_set_crash_scope(ulong, char *); extern int *gdb_output_format; extern unsigned int *gdb_print_max; extern int *gdb_prettyprint_structs; extern int *gdb_prettyprint_arrays; extern int *gdb_repeat_count_threshold; extern int *gdb_stop_print_at_null; extern unsigned int *gdb_output_radix; /* * gdb/top.c */ extern void execute_command (char *, int); #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) extern void (*command_loop_hook)(void); extern void (*error_hook)(void); #else extern void (*deprecated_command_loop_hook)(void); /* * gdb/exceptions.c */ extern void (*error_hook)(void); #endif /* * gdb/symtab.c */ extern void gdb_command_funnel(struct gnu_request *); /* * gdb/symfile.c */ #if defined(GDB_6_0) || defined(GDB_6_1) struct objfile; extern void (*target_new_objfile_hook)(struct objfile *); #endif /* * gdb/valprint.c */ extern unsigned output_radix; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) extern int output_format; extern int prettyprint_structs; extern int prettyprint_arrays; extern int repeat_count_threshold; extern unsigned int print_max; extern int stop_print_at_null; #endif #ifdef GDB_7_6 /* * gdb/cleanups.c */ struct cleanup; extern struct cleanup *all_cleanups(void); extern void do_cleanups(struct cleanup *); #else /* * gdb/utils.c */ extern void do_cleanups(void *); #endif /* * gdb/version.c */ extern char *version; /* * gdb/disasm.c */ #ifdef GDB_5_3 extern int gdb_disassemble_from_exec; #endif /* * readline/readline.c */ #ifdef GDB_5_3 extern char *readline(char *); #else extern char *readline(const char *); #endif extern int rl_editing_mode; /* * readline/history.c */ extern int history_offset; /* * external gdb routines */ extern int gdb_main_entry(int, char **); #ifdef GDB_5_3 extern unsigned long calc_crc32(unsigned long, unsigned char *, size_t); #else extern unsigned long gnu_debuglink_crc32 (unsigned long, unsigned char *, size_t); #endif extern int have_partial_symbols(void); extern int have_full_symbols(void); #if defined(X86) || defined(X86_64) || defined(IA64) #define XEN_HYPERVISOR_ARCH #endif #endif /* !GDB_COMMON */ crash-7.1.4/lkcd_fix_mem.h0000664000000000000000000002576312634305150014131 0ustar rootroot/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* OBSOLETE */ #ifdef IA64 #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* timeval depends on architecture, two long values */ struct { uint64_t tv_sec; uint64_t tv_usec; } dh_time; /* the time of the system crash */ /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ uint64_t dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; } __attribute__((packed)) dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } __attribute__((packed)) dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. * */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs */ // struct pt_regs *dha_pt_regs; // version 4 changed this uint64_t dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; uint32_t dha_dumping_cpu; // v4 changed this struct pt_regs dha_smp_regs[NR_CPUS]; uint64_t dha_smp_current_task[NR_CPUS]; // v4 changed this uint64_t dha_stack[NR_CPUS]; // v4 changed this uint64_t dha_switch_stack[NR_CPUS]; // v4 changed this } __attribute__((packed)) dump_header_asm_t; #endif // IA64 crash-7.1.4/kvmdump.c0000664000000000000000000007533112634305150013162 0ustar rootroot/* * kvmdump.c * * Copyright (C) 2009, 2010, 2011 David Anderson * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "kvmdump.h" static struct kvmdump_data kvmdump_data = { 0 }; struct kvmdump_data *kvm = &kvmdump_data; static int cache_page(physaddr_t); static int kvmdump_mapfile_exists(void); static off_t mapfile_offset(uint64_t); static void kvmdump_mapfile_create(char *); static void kvmdump_mapfile_append(void); static char *mapfile_in_use(void); static void write_mapfile_registers(void); static void write_mapfile_trailer(void); static void read_mapfile_trailer(void); static void read_mapfile_registers(void); #define RAM_OFFSET_COMPRESSED (~(off_t)255) #define QEMU_COMPRESSED ((WRITE_ERROR)-1) #define CACHE_UNUSED (1ULL) int is_kvmdump(char *filename) { int i; ulong *ptr; off_t eof; ulonglong csum; struct mapinfo_trailer trailer; char buf[CHKSUM_SIZE]; if (!is_qemu_vm_file(filename)) return FALSE; if (lseek(kvm->vmfd, 0, SEEK_SET) < 0) { error(INFO, "%s: read: %s\n", filename, strerror(errno)); return FALSE; } if (read(kvm->vmfd, buf, CHKSUM_SIZE) != CHKSUM_SIZE) { error(INFO, "%s: read: %s\n", filename, strerror(errno)); return FALSE; } ptr = (ulong *)&buf[0]; for (i = csum = 0; i < (CHKSUM_SIZE/sizeof(ulong)); i++, ptr++) csum += *ptr; eof = lseek(kvm->vmfd, 0, SEEK_END); if (lseek(kvm->vmfd, eof - sizeof(trailer), SEEK_SET) < 0) { error(INFO, "%s: lseek: %s\n", filename, strerror(errno)); return FALSE; } if (read(kvm->vmfd, &trailer, sizeof(trailer)) != sizeof(trailer)) { error(INFO, "%s: read: %s\n", filename, strerror(errno)); return FALSE; } if (trailer.magic == MAPFILE_MAGIC) { kvm->mapinfo.map_start_offset = trailer.map_start_offset; kvm->flags |= MAPFILE_APPENDED; } kvm->mapinfo.checksum = csum; return TRUE; } int kvmdump_init(char *filename, FILE *fptr) { int i, page_size; struct command_table_entry *cp; char *cachebuf; FILE *tmpfp; if (!machine_type("X86") && !machine_type("X86_64")) { error(FATAL, "invalid or unsupported host architecture for KVM: %s\n", MACHINE_TYPE); return FALSE; } kvm->ofp = fptr; kvm->debug = &pc->debug; page_size = memory_page_size(); #ifdef X86_64 kvm->kvbase = __START_KERNEL_map; #endif switch (kvm->flags & (TMPFILE|MAPFILE|MAPFILE_APPENDED)) { case MAPFILE_APPENDED: kvm->mapfd = kvm->vmfd; break; case MAPFILE|MAPFILE_APPENDED: case MAPFILE: break; default: if (kvmdump_mapfile_exists()) break; if ((tmpfp = tmpfile()) == NULL) error(FATAL, "cannot create tmpfile for KVM file offsets: %s\n", strerror(errno)); kvm->mapfd = fileno(tmpfp); kvm->flags |= TMPFILE; break; } if ((cachebuf = calloc(1, KVMDUMP_CACHED_PAGES * page_size)) == NULL) error(FATAL, "%s: cannot malloc KVM page_cache_buf\n"); for (i = 0; i < KVMDUMP_CACHED_PAGES; i++) { kvm->page_cache[i].paddr = CACHE_UNUSED; kvm->page_cache[i].bufptr = cachebuf + (i * page_size); } kvmdump_regs_store(KVMDUMP_REGS_START, NULL); if (qemu_init(filename)) { switch (kvm->flags & (TMPFILE|MAPFILE|MAPFILE_APPENDED)) { case TMPFILE: kvmdump_regs_store(KVMDUMP_REGS_END, NULL); write_mapfile_trailer(); break; case MAPFILE: case MAPFILE_APPENDED: case MAPFILE|MAPFILE_APPENDED: read_mapfile_trailer(); kvmdump_regs_store(KVMDUMP_REGS_END, NULL); break; } for (cp = pc->cmd_table; cp->name; cp++) { if (STREQ(cp->name, "map")) { cp->flags &= ~HIDDEN_COMMAND; break; } } kvm->flags |= KVMDUMP_LOCAL; return TRUE; } else return FALSE; } int read_kvmdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { switch (cache_page(PHYSPAGEBASE(paddr))) { case READ_ERROR: return READ_ERROR; case SEEK_ERROR: return SEEK_ERROR; case QEMU_COMPRESSED: memset(bufptr, kvm->un.compressed, cnt); break; default: memcpy(bufptr, kvm->un.curbufptr + PAGEOFFSET(paddr), cnt); break; } return cnt; } int write_kvmdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return SEEK_ERROR; } /* * kvmdump_free_memory(), and kvmdump_memory_used() * are debug only, and typically unnecessary to implement. */ int kvmdump_free_memory(void) { return 0; } int kvmdump_memory_used(void) { return 0; } /* * This function is dump-type independent, used here to * to dump the kvmdump_data structure contents. */ int kvmdump_memory_dump(FILE *ofp) { int i, others; struct mapinfo_trailer trailer; off_t eof; fprintf(ofp, " flags: %lx (", kvm->flags); others = 0; if (kvm->flags & KVMDUMP_LOCAL) fprintf(ofp, "%sKVMDUMP_LOCAL", others++ ? "|" : ""); if (kvm->flags & TMPFILE) fprintf(ofp, "%sTMPFILE", others++ ? "|" : ""); if (kvm->flags & MAPFILE) fprintf(ofp, "%sMAPFILE", others++ ? "|" : ""); if (kvm->flags & MAPFILE_FOUND) fprintf(ofp, "%sMAPFILE_FOUND", others++ ? "|" : ""); if (kvm->flags & MAPFILE_APPENDED) fprintf(ofp, "%sMAPFILE_APPENDED", others++ ? "|" : ""); if (kvm->flags & NO_PHYS_BASE) fprintf(ofp, "%sNO_PHYS_BASE", others++ ? "|" : ""); if (kvm->flags & KVMHOST_32) fprintf(ofp, "%sKVMHOST_32", others++ ? "|" : ""); if (kvm->flags & KVMHOST_64) fprintf(ofp, "%sKVMHOST_64", others++ ? "|" : ""); if (kvm->flags & REGS_FROM_MAPFILE) fprintf(ofp, "%sREGS_FROM_MAPFILE", others++ ? "|" : ""); if (kvm->flags & REGS_FROM_DUMPFILE) fprintf(ofp, "%sREGS_FROM_DUMPFILE", others++ ? "|" : ""); if (kvm->flags & REGS_NOT_AVAIL) fprintf(ofp, "%sREGS_NOT_AVAIL", others++ ? "|" : ""); fprintf(ofp, ")\n"); fprintf(ofp, " mapfd: %d\n", kvm->mapfd); fprintf(ofp, " vmfd: %d\n", kvm->vmfd); fprintf(ofp, " vmp: %lx (fd: %d)\n", (ulong)kvm->vmp, fileno(kvm->vmp)); fprintf(ofp, " ofp: %lx\n", (ulong)kvm->ofp); fprintf(ofp, " debug: %lx\n", (ulong)kvm->debug); if (machine_type("X86_64")) fprintf(ofp, " kvbase: %llx\n", (ulonglong)kvm->kvbase); else fprintf(ofp, " kvbase: (unused)\n"); fprintf(ofp, " mapinfo:\n"); fprintf(ofp, " magic: %llx %s\n", (ulonglong)kvm->mapinfo.magic, kvm->mapinfo.magic == MAPFILE_MAGIC ? "(MAPFILE_MAGIC)" : ""); fprintf(ofp, " phys_base: %llx %s\n", (ulonglong)kvm->mapinfo.phys_base, machine_type("X86") ? "(unused)" : ""); fprintf(ofp, " cpu_version_id: %ld\n", (ulong)kvm->mapinfo.cpu_version_id); fprintf(ofp, " ram_version_id: %ld\n", (ulong)kvm->mapinfo.ram_version_id); fprintf(ofp, " map_start_offset: %llx\n", (ulonglong)kvm->mapinfo.map_start_offset); fprintf(ofp, " checksum: %llx\n", (ulonglong)kvm->mapinfo.checksum); fprintf(ofp, " curbufptr: %lx\n", (ulong)kvm->un.curbufptr); fprintf(ofp, " evict_index: %d\n", kvm->evict_index); fprintf(ofp, " accesses: %ld\n", kvm->accesses); fprintf(ofp, " hit_count: %ld ", kvm->hit_count); if (kvm->accesses) fprintf(ofp, "(%ld%%)\n", kvm->hit_count * 100 / kvm->accesses); else fprintf(ofp, "\n"); fprintf(ofp, " compresses: %ld ", kvm->compresses); if (kvm->accesses) fprintf(ofp, "(%ld%%)\n", kvm->compresses * 100 / kvm->accesses); else fprintf(ofp, "\n"); for (i = 0; i < KVMDUMP_CACHED_PAGES; i++) { if (kvm->page_cache[i].paddr == CACHE_UNUSED) fprintf(ofp, " %spage_cache[%d]: CACHE_UNUSED\n", i < 10 ? " " : "", i); else fprintf(ofp, " %spage_cache[%d]: bufptr: %lx addr: %llx\n", i < 10 ? " " : "", i, (ulong)kvm->page_cache[i].bufptr, (ulonglong)kvm->page_cache[i].paddr); } fprintf(ofp, " cpu_devices: %d\n", kvm->cpu_devices); fprintf(ofp, " iohole: %llx (%llx - %llx)\n", (ulonglong)kvm->iohole, 0x100000000ULL - kvm->iohole, 0x100000000ULL); fprintf(ofp, " registers: %s\n", kvm->registers ? "" : "(not used)"); for (i = 0; i < kvm->cpu_devices; i++) { fprintf(ofp, " CPU %d:\n", i); kvmdump_display_regs(i, ofp); } fprintf(ofp, "\n"); dump_qemu_header(ofp); fprintf(ofp, "\n%s: mapinfo trailer:\n\n", mapfile_in_use()); eof = lseek(kvm->mapfd, 0, SEEK_END); if (lseek(kvm->mapfd, eof - sizeof(trailer), SEEK_SET) < 0) error(FATAL, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); if (read(kvm->mapfd, &trailer, sizeof(trailer)) != sizeof(trailer)) error(FATAL, "%s: read: %s\n", mapfile_in_use(), strerror(errno)); fprintf(ofp, " magic: %llx %s\n", (ulonglong)trailer.magic, trailer.magic == MAPFILE_MAGIC ? "(MAPFILE_MAGIC)" : ""); fprintf(ofp, " phys_base: %llx %s\n", (ulonglong)trailer.phys_base, machine_type("X86") ? "(unused)" : ""); fprintf(ofp, " cpu_version_id: %ld\n", (ulong)trailer.cpu_version_id); fprintf(ofp, " ram_version_id: %ld\n", (ulong)trailer.ram_version_id); fprintf(ofp, " map_start_offset: %llx\n", (ulonglong)trailer.map_start_offset); fprintf(ofp, " checksum: %llx\n\n", (ulonglong)trailer.checksum); return TRUE; } void kvmdump_display_regs(int cpu, FILE *ofp) { struct register_set *rp; if (cpu >= kvm->cpu_devices) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } rp = &kvm->registers[cpu]; if (machine_type("X86_64")) { fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", (ulonglong)rp->ip, (ulonglong)rp->regs[R_ESP], (ulonglong)rp->flags, (ulonglong)rp->regs[R_EAX], (ulonglong)rp->regs[R_EBX], (ulonglong)rp->regs[R_ECX], (ulonglong)rp->regs[R_EDX], (ulonglong)rp->regs[R_ESI], (ulonglong)rp->regs[R_EDI], (ulonglong)rp->regs[R_EBP], (ulonglong)rp->regs[8], (ulonglong)rp->regs[9], (ulonglong)rp->regs[10], (ulonglong)rp->regs[11], (ulonglong)rp->regs[12], (ulonglong)rp->regs[13], (ulonglong)rp->regs[14], (ulonglong)rp->regs[15], rp->cs, rp->ss); } if (machine_type("X86")) { fprintf(ofp, " EAX: %08llx EBX: %08llx ECX: %08llx EDX: %08llx\n" " DS: %04x ESI: %08llx ES: %04x EDI: %08llx\n" " SS: %04x ESP: %08llx EBP: %08llx GS: %04x\n" " CS: %04x EIP: %08llx EFLAGS: %08llx\n", (ulonglong)rp->regs[R_EAX], (ulonglong)rp->regs[R_EBX], (ulonglong)rp->regs[R_ECX], (ulonglong)rp->regs[R_EDX], rp->ds, (ulonglong)rp->regs[R_ESI], rp->ds, (ulonglong)rp->regs[R_EDI], rp->ss, (ulonglong)rp->regs[R_ESP], (ulonglong)rp->regs[R_EBP], rp->gs, rp->cs, (ulonglong)rp->ip, (ulonglong)rp->flags); } } void get_kvmdump_regs(struct bt_info *bt, ulong *ipp, ulong *spp) { ulong ip, sp; struct register_set *rp; ip = sp = 0; if (!is_task_active(bt->task)) { machdep->get_stack_frame(bt, ipp, spp); return; } bt->flags |= BT_DUMPFILE_SEARCH; if (machine_type("X86_64")) machdep->get_stack_frame(bt, ipp, spp); else if (machine_type("X86")) get_netdump_regs_x86(bt, ipp, spp); if (bt->flags & BT_DUMPFILE_SEARCH) return; if ((kvm->registers == NULL) || (bt->tc->processor >= kvm->cpu_devices)) return; rp = &kvm->registers[bt->tc->processor]; ip = (ulong)rp->ip; sp = (ulong)rp->regs[R_ESP]; if (is_kernel_text(ip) && (((sp >= GET_STACKBASE(bt->task)) && (sp < GET_STACKTOP(bt->task))) || in_alternate_stack(bt->tc->processor, sp))) { *ipp = ip; *spp = sp; bt->flags |= BT_KERNEL_SPACE; return; } if (!is_kernel_text(ip) && in_user_stack(bt->tc->task, sp)) bt->flags |= BT_USER_SPACE; } ulong get_kvmdump_panic_task(void) { int i; struct bt_info *bt; ulong panic_task, task, rip, rsp; char *sym; if (machine_type("X86") || !get_active_set()) return NO_TASK; bt = (struct bt_info *)GETBUF(sizeof(struct bt_info)); for (i = 0, panic_task = NO_TASK; i < NR_CPUS; i++) { if (!(task = tt->active_set[i]) || !(bt->tc = task_to_context(task))) continue; bt->task = task; bt->stackbase = GET_STACKBASE(task); bt->stacktop = GET_STACKTOP(task); if (!bt->stackbuf) bt->stackbuf = GETBUF(bt->stacktop - bt->stackbase); alter_stackbuf(bt); bt->flags |= BT_DUMPFILE_SEARCH; machdep->get_stack_frame(bt, &rip, &rsp); if (!(bt->flags & BT_DUMPFILE_SEARCH)) continue; sym = closest_symbol(rip); if (STREQ(sym, "panic") || STREQ(sym, "die") || STREQ(sym, "die_nmi") || STREQ(sym, "sysrq_handle_crash")) { if (CRASHDEBUG(1)) fprintf(fp, "get_kvmdump_panic_task: %lx\n", task); panic_task = task; break; } } if (bt->stackbuf) FREEBUF(bt->stackbuf); FREEBUF(bt); return panic_task; } int kvmdump_phys_base(unsigned long *phys_base) { if (KVMDUMP_VALID()) { if (CRASHDEBUG(1) && (kvm->mapinfo.cpu_version_id > 9)) error(NOTE, "KVM/QEMU CPU_SAVE_VERSION %d is greater than" " supported version 9\n\n", kvm->mapinfo.cpu_version_id); *phys_base = kvm->mapinfo.phys_base; return (kvm->flags & NO_PHYS_BASE ? FALSE : TRUE); } return FALSE; } static int cache_page(physaddr_t paddr) { int idx, err; struct kvm_page_cache_hdr *pgc; size_t page_size; off_t offset; kvm->accesses++; for (idx = 0; idx < KVMDUMP_CACHED_PAGES; idx++) { pgc = &kvm->page_cache[idx]; if (pgc->paddr == CACHE_UNUSED) continue; if (pgc->paddr == paddr) { kvm->hit_count++; kvm->un.curbufptr = pgc->bufptr; return idx; } } if ((err = load_mapfile_offset(paddr, &offset)) < 0) return err; if ((offset & RAM_OFFSET_COMPRESSED) == RAM_OFFSET_COMPRESSED) { kvm->un.compressed = (unsigned char)(offset & 255); kvm->compresses++; return QEMU_COMPRESSED; } idx = kvm->evict_index; pgc = &kvm->page_cache[idx]; page_size = memory_page_size(); if (lseek(kvm->vmfd, offset, SEEK_SET) < 0) { pgc->paddr = CACHE_UNUSED; return SEEK_ERROR; } if (read(kvm->vmfd, pgc->bufptr, page_size) != page_size) { pgc->paddr = CACHE_UNUSED; return READ_ERROR; } kvm->evict_index = (idx+1) % KVMDUMP_CACHED_PAGES; pgc->paddr = paddr; kvm->un.curbufptr = pgc->bufptr; return idx; } static off_t mapfile_offset(uint64_t physaddr) { off_t offset = 0; switch (kvm->flags & (TMPFILE|MAPFILE|MAPFILE_APPENDED)) { case TMPFILE: case TMPFILE|MAPFILE_APPENDED: case MAPFILE: case MAPFILE|MAPFILE_APPENDED: offset = (off_t)(((((uint64_t)physaddr/(uint64_t)4096)) * sizeof(off_t))); break; case MAPFILE_APPENDED: offset = (off_t)(((((uint64_t)physaddr/(uint64_t)4096)) * sizeof(off_t)) + kvm->mapinfo.map_start_offset); break; } return offset; } int store_mapfile_offset(uint64_t physaddr, off_t *entry_ptr) { if (lseek(kvm->mapfd, mapfile_offset(physaddr), SEEK_SET) < 0) { error(INFO, "store_mapfile_offset: " "lseek error: physaddr: %llx %s offset: %llx\n", (unsigned long long)physaddr, mapfile_in_use(), (unsigned long long)mapfile_offset(physaddr)); return SEEK_ERROR; } if (write(kvm->mapfd, entry_ptr, sizeof(off_t)) != sizeof(off_t)) { error(INFO, "store_mapfile_offset: " "write error: physaddr: %llx %s offset: %llx\n", (unsigned long long)physaddr, mapfile_in_use(), (unsigned long long)mapfile_offset(physaddr)); return WRITE_ERROR; } return 0; } int load_mapfile_offset(uint64_t physaddr, off_t *entry_ptr) { uint64_t kvm_addr = physaddr; switch (kvm->iohole) { case 0x20000000ULL: if (physaddr >= 0xe0000000ULL) { if (physaddr < 0x100000000ULL) return SEEK_ERROR; /* In 512MB I/O hole */ kvm_addr -= kvm->iohole; } break; case 0x40000000ULL: if (physaddr >= 0xc0000000ULL) { if (physaddr < 0x100000000ULL) return SEEK_ERROR; /* In 1GB I/O hole */ kvm_addr -= kvm->iohole; } break; } if (lseek(kvm->mapfd, mapfile_offset(kvm_addr), SEEK_SET) < 0) { if (CRASHDEBUG(1)) error(INFO, "load_mapfile_offset: " "lseek error: physical: %llx %s offset: %llx\n", (unsigned long long)physaddr, mapfile_in_use(), (unsigned long long)mapfile_offset(kvm_addr)); return SEEK_ERROR; } if (read(kvm->mapfd, entry_ptr, sizeof(off_t)) != sizeof(off_t)) { if (CRASHDEBUG(1)) error(INFO, "load_mapfile_offset: " "read error: physical: %llx %s offset: %llx\n", (unsigned long long)physaddr, mapfile_in_use(), (unsigned long long)mapfile_offset(kvm_addr)); return READ_ERROR; } return 0; } static void kvmdump_mapfile_create(char *filename) { int fdmem, n; off_t offset; char buf[4096]; if (kvm->flags & MAPFILE) { error(INFO, "%s: mapfile in use\n", pc->kvmdump_mapfile); return; } if (file_exists(filename, NULL)) { error(INFO, "%s: file already exists!\n", filename); return; } if ((fdmem = open(filename, O_CREAT|O_RDWR, 0644)) < 0) { error(INFO, "%s: open: %s\n", filename, strerror(errno)); return; } offset = kvm->mapinfo.map_start_offset; if (lseek(kvm->mapfd, offset, SEEK_SET) < 0) { error(INFO, "%s: leek: %s\n", mapfile_in_use(), strerror(errno)); return; } while ((n = read(kvm->mapfd, buf, 4096)) > 0) { if (write(fdmem, buf, n) != n) { error(INFO, "%s: write: %s\n", filename, strerror(errno)); break; } } close(fdmem); fprintf(fp, "MAP FILE CREATED: %s\n", filename); } static void kvmdump_mapfile_append(void) { int n, fdcore; ulong round_bytes; struct stat statbuf; uint64_t map_start_offset; off_t eof, orig_dumpfile_size; char buf[4096]; if (kvm->flags & MAPFILE_APPENDED) error(FATAL, "mapfile already appended to %s\n", pc->dumpfile); if (access(pc->dumpfile, W_OK) != 0) error(FATAL, "%s: cannot append map information to this file\n", pc->dumpfile); if (stat(pc->dumpfile, &statbuf) < 0) error(FATAL, "%s: stat: %s\n", pc->dumpfile, strerror(errno)); round_bytes = (sizeof(uint64_t) - (statbuf.st_size % sizeof(uint64_t))) % sizeof(uint64_t); if ((fdcore = open(pc->dumpfile, O_WRONLY)) < 0) error(FATAL, "%s: open: %s\n", pc->dumpfile, strerror(errno)); if ((orig_dumpfile_size = lseek(fdcore, 0, SEEK_END)) < 0) { error(INFO, "%s: lseek: %s\n", pc->dumpfile, strerror(errno)); goto bailout1; } if (round_bytes) { BZERO(buf, round_bytes); if (write(fdcore, buf, round_bytes) != round_bytes) { error(INFO, "%s: write: %s\n", pc->dumpfile, strerror(errno)); goto bailout2; } } map_start_offset = orig_dumpfile_size + round_bytes; if (lseek(kvm->mapfd, 0, SEEK_SET) != 0) { error(INFO, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); goto bailout2; } while ((n = read(kvm->mapfd, buf, 4096)) > 0) { if (write(fdcore, buf, n) != n) { error(INFO, "%s: write: %s\n", pc->dumpfile, strerror(errno)); goto bailout2; } } /* * Overwrite the map_start_offset value in the trailer to reflect * its location in the appended-to dumpfile. */ eof = lseek(fdcore, 0, SEEK_END); if (lseek(fdcore, eof - sizeof(struct mapinfo_trailer), SEEK_SET) < 0) { error(INFO, "%s: write: %s\n", pc->dumpfile, strerror(errno)); goto bailout2; } if (write(fdcore, &map_start_offset, sizeof(uint64_t)) != sizeof(uint64_t)) { error(INFO, "%s: write: %s\n", pc->dumpfile, strerror(errno)); goto bailout2; } close(fdcore); kvm->flags |= MAPFILE_APPENDED; fprintf(fp, "MAP FILE APPENDED TO: %s\n", pc->dumpfile); return; bailout2: if (ftruncate(fdcore, (off_t)orig_dumpfile_size) < 0) error(INFO, "%s: ftruncate: %s\n", pc->dumpfile, strerror(errno)); bailout1: close(fdcore); error(INFO, "failed to append map to %s\n", pc->dumpfile); } int is_kvmdump_mapfile(char *filename) { int fd; struct mapinfo_trailer trailer; off_t eof; if ((fd = open(filename, O_RDONLY)) < 0) { error(INFO, "%s: open: %s\n", filename, strerror(errno)); return FALSE; } if ((eof = lseek(fd, 0, SEEK_END)) == -1) goto bailout; if (lseek(fd, eof - sizeof(trailer), SEEK_SET) < 0) { error(INFO, "%s: lseek: %s\n", filename, strerror(errno)); goto bailout; } if (read(fd, &trailer, sizeof(trailer)) != sizeof(trailer)) { error(INFO, "%s: read: %s\n", filename, strerror(errno)); goto bailout; } if (trailer.magic == MAPFILE_MAGIC) { if (pc->dumpfile && (trailer.checksum != kvm->mapinfo.checksum)) { error(kvm->flags & MAPFILE_FOUND ? INFO : FATAL, "checksum mismatch between %s and %s\n\n", pc->dumpfile, filename); goto bailout; } kvm->mapfd = fd; kvm->flags |= MAPFILE; return TRUE; } bailout: close(fd); return FALSE; } static int kvmdump_mapfile_exists(void) { char *filename; struct stat stat; if (!(filename = malloc(strlen(pc->dumpfile) + strlen(".map") + 10))) return FALSE; sprintf(filename, "%s.map", pc->dumpfile); if (!file_exists(filename, &stat) || !S_ISREG(stat.st_mode)) return FALSE; if (is_kvmdump_mapfile(filename)) { pc->kvmdump_mapfile = filename; kvm->flags |= MAPFILE_FOUND; return TRUE; } free(filename); return FALSE; } void cmd_map(void) { int c; int append, file, specified; char *mapfile; append = file = specified = 0; mapfile = NULL; while ((c = getopt(argcnt, args, "af")) != EOF) { switch(c) { case 'a': append++; break; case 'f': file++; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { if (!mapfile) { mapfile = args[optind]; specified++; } else cmd_usage(pc->curcmd, SYNOPSIS); optind++; } if (file && !specified) { mapfile = GETBUF(strlen(pc->dumpfile)+10); sprintf(mapfile, "%s.map", pc->dumpfile); } if (append) kvmdump_mapfile_append(); if (file) { kvmdump_mapfile_create(mapfile); if (!specified) FREEBUF(mapfile); } if (!file && !append) fprintf(fp, "MAP FILE IN USE: %s\n", mapfile_in_use()); } static char * mapfile_in_use(void) { char *name; switch (kvm->flags & (TMPFILE|MAPFILE|MAPFILE_APPENDED)) { default: case TMPFILE: case TMPFILE|MAPFILE_APPENDED: name = "(tmpfile)"; break; case MAPFILE: case MAPFILE|MAPFILE_APPENDED: name = pc->kvmdump_mapfile; break; case MAPFILE_APPENDED: name = pc->dumpfile; break; } return name; } static void write_mapfile_trailer(void) { if (kvm->cpu_devices) write_mapfile_registers(); kvm->mapinfo.magic = MAPFILE_MAGIC; if (lseek(kvm->mapfd, 0, SEEK_END) < 0) error(FATAL, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); if (write(kvm->mapfd, &kvm->mapinfo, sizeof(struct mapinfo_trailer)) != sizeof(struct mapinfo_trailer)) error(FATAL, "%s: write: %s\n", mapfile_in_use(), strerror(errno)); } static void write_mapfile_registers(void) { size_t regs_size; uint64_t magic; if (lseek(kvm->mapfd, 0, SEEK_END) < 0) error(FATAL, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); regs_size = sizeof(struct register_set) * kvm->cpu_devices; if (write(kvm->mapfd, &kvm->registers[0], regs_size) != regs_size) error(FATAL, "%s: write: %s\n", mapfile_in_use(), strerror(errno)); if (write(kvm->mapfd, &kvm->cpu_devices, sizeof(uint64_t)) != sizeof(uint64_t)) error(FATAL, "%s: write: %s\n", mapfile_in_use(), strerror(errno)); magic = REGS_MAGIC; if (write(kvm->mapfd, &magic, sizeof(uint64_t)) != sizeof(uint64_t)) error(FATAL, "%s: write: %s\n", mapfile_in_use(), strerror(errno)); } static void read_mapfile_trailer(void) { off_t eof; struct mapinfo_trailer trailer; if ((eof = lseek(kvm->mapfd, 0, SEEK_END)) < 0) error(FATAL, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); if (lseek(kvm->mapfd, eof - sizeof(trailer), SEEK_SET) < 0) error(FATAL, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); if (read(kvm->mapfd, &trailer, sizeof(trailer)) != sizeof(trailer)) error(FATAL, "%s: read: %s\n", mapfile_in_use(), strerror(errno)); if (kvm->mapinfo.checksum != trailer.checksum) error(FATAL, "checksum mismatch between %s and %s\n", pc->dumpfile, mapfile_in_use()); kvm->mapinfo = trailer; read_mapfile_registers(); } static void read_mapfile_registers(void) { size_t regs_size; uint64_t ncpus, magic; off_t offset; if ((offset = lseek(kvm->mapfd, 0, SEEK_END)) < 0) error(FATAL, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); offset -= sizeof(struct mapinfo_trailer) + sizeof(magic) + sizeof(ncpus); if (lseek(kvm->mapfd, offset, SEEK_SET) < 0) error(FATAL, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); if (read(kvm->mapfd, &ncpus, sizeof(uint64_t)) != sizeof(uint64_t)) error(FATAL, "%s: read: %s\n", mapfile_in_use(), strerror(errno)); if (read(kvm->mapfd, &magic, sizeof(uint64_t)) != sizeof(uint64_t)) error(FATAL, "%s: read: %s\n", mapfile_in_use(), strerror(errno)); if ((magic != REGS_MAGIC) || (ncpus >= NR_CPUS)) { kvm->flags |= REGS_NOT_AVAIL; return; } regs_size = sizeof(struct register_set) * ncpus; offset -= regs_size; if (lseek(kvm->mapfd, offset, SEEK_SET) < 0) error(FATAL, "%s: lseek: %s\n", mapfile_in_use(), strerror(errno)); if (read(kvm->mapfd, &kvm->registers[0], regs_size) != regs_size) error(FATAL, "%s: read: %s\n", mapfile_in_use(), strerror(errno)); kvm->cpu_devices = ncpus; kvm->flags |= REGS_FROM_MAPFILE; } void set_kvmhost_type(char *host) { if (!machine_type("X86")) { error(INFO, "--kvmhost is only applicable to the X86 architecture\n"); return; } if (STREQ(host, "32")) { kvm->flags &= ~KVMHOST_64; kvm->flags |= KVMHOST_32; } else if (STREQ(host, "64")) { kvm->flags &= ~KVMHOST_32; kvm->flags |= KVMHOST_64; } else error(INFO, "invalid --kvmhost argument: %s\n", host); } /* * set_kvm_iohole() is called from main() with a command line argument, * or from the x86/x86_64_init functions for assistance in determining * the I/O hole size. */ void set_kvm_iohole(char *optarg) { #define DEFAULT_IOHOLE() \ ((kvm->mapinfo.cpu_version_id <= 9) ? 0x40000000 : 0x20000000) #define E820_RAM 1 if (optarg) { ulong flags; ulonglong iohole; char *arg; flags = LONG_LONG; if (IS_A_NUMBER(&LASTCHAR(optarg))) flags |= HEX_BIAS; arg = strdup(optarg); if (!calculate(arg, NULL, &iohole, flags)) error(FATAL, "invalid --kvm_iohole argument: %s\n", optarg); free(arg); /* * Only 512MB or 1GB have been used to date. */ if ((iohole != 0x20000000ULL) && (iohole != 0x40000000ULL)) error(WARNING, "questionable --kvmio argument: %s\n", optarg); kvm->iohole = iohole; } else { int nr_map, i; char *buf, *e820entry; ulonglong addr, size, ending_addr; uint type; if (kvm->iohole) return; /* set by command line option below */ kvm->iohole = DEFAULT_IOHOLE(); if (!symbol_exists("e820")) return; buf = (char *)GETBUF(SIZE(e820map)); if (!readmem(symbol_value("e820"), KVADDR, &buf[0], SIZE(e820map), "e820map", RETURN_ON_ERROR|QUIET)) { FREEBUF(buf); return; } nr_map = INT(buf + OFFSET(e820map_nr_map)); for (i = 0; i < nr_map; i++) { e820entry = buf + sizeof(int) + (SIZE(e820entry) * i); addr = ULONGLONG(e820entry + OFFSET(e820entry_addr)); size = ULONGLONG(e820entry + OFFSET(e820entry_size)); type = UINT(e820entry + OFFSET(e820entry_type)); if (type != E820_RAM) continue; if (addr >= 0x100000000ULL) break; ending_addr = addr + size; if ((ending_addr > 0xc0000000ULL) && (ending_addr <= 0xe0000000ULL)) { kvm->iohole = 0x20000000ULL; break; } } FREEBUF(buf); } } #include "qemu-load.h" int kvmdump_regs_store(uint32_t cpu, struct qemu_device_x86 *dx86) { struct register_set *rp; int retval; retval = TRUE; switch (cpu) { case KVMDUMP_REGS_START: if ((kvm->registers = calloc(NR_CPUS, sizeof(struct register_set))) == NULL) error(FATAL, "kvmdump_regs_store: " "cannot malloc KVM register_set array\n"); kvm->cpu_devices = 0; break; case KVMDUMP_REGS_END: if (kvm->cpu_devices == 0) { free(kvm->registers); kvm->registers = NULL; } else if ((kvm->registers = realloc(kvm->registers, sizeof(struct register_set) * kvm->cpu_devices)) == NULL) error(FATAL, "kvmdump_regs_store: " "cannot realloc KVM registers array\n"); break; default: if (cpu >= NR_CPUS) { if (machine_type("X86") && !(kvm->flags & (KVMHOST_32|KVMHOST_64))) return FALSE; break; } rp = &kvm->registers[cpu]; rp->ip = dx86->eip; rp->flags = dx86->eflags; rp->cs = dx86->cs.selector; rp->ss = dx86->ss.selector; rp->ds = dx86->ds.selector; rp->es = dx86->es.selector; rp->fs = dx86->fs.selector; rp->gs = dx86->gs.selector; BCOPY(dx86->regs, rp->regs, 16*sizeof(uint64_t)); kvm->cpu_devices = cpu+1; kvm->flags |= REGS_FROM_DUMPFILE; if (machine_type("X86_64") || (kvm->flags & (KVMHOST_32|KVMHOST_64))) break; if ((rp->regs[R_EAX] & UPPER_32_BITS) || (rp->regs[R_EBX] & UPPER_32_BITS) || (rp->regs[R_ECX] & UPPER_32_BITS) || (rp->regs[R_EDX] & UPPER_32_BITS) || (rp->regs[R_ESI] & UPPER_32_BITS) || (rp->regs[R_EDI] & UPPER_32_BITS) || (rp->regs[R_ESP] & UPPER_32_BITS) || (rp->regs[R_EBP] & UPPER_32_BITS) || (rp->ip & UPPER_32_BITS)) retval = FALSE; break; } return retval; } int get_kvm_register_set(int cpu, struct kvm_register_set *krs) { struct register_set *rs = &kvm->registers[cpu]; if (!krs) return FALSE; if (machine_type("X86") || machine_type("X86_64")) { krs->x86.cs = rs->cs; krs->x86.ss = rs->ss; krs->x86.ds = rs->ds; krs->x86.es = rs->es; krs->x86.fs = rs->fs; krs->x86.gs = rs->gs; krs->x86.ip = rs->ip; krs->x86.flags = rs->flags; krs->x86.regs[0] = rs->regs[0]; krs->x86.regs[1] = rs->regs[1]; krs->x86.regs[2] = rs->regs[2]; krs->x86.regs[3] = rs->regs[3]; krs->x86.regs[4] = rs->regs[4]; krs->x86.regs[5] = rs->regs[5]; krs->x86.regs[6] = rs->regs[6]; krs->x86.regs[7] = rs->regs[7]; krs->x86.regs[8] = rs->regs[8]; krs->x86.regs[9] = rs->regs[9]; krs->x86.regs[10] = rs->regs[10]; krs->x86.regs[11] = rs->regs[11]; krs->x86.regs[12] = rs->regs[12]; krs->x86.regs[13] = rs->regs[13]; krs->x86.regs[14] = rs->regs[14]; krs->x86.regs[15] = rs->regs[15]; return TRUE; } return FALSE; } crash-7.1.4/alpha.c0000775000000000000000000022547412634305150012574 0ustar rootroot/* alpha.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2010-2013 David Anderson * Copyright (C) 2002-2006, 2010-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifdef ALPHA #include "defs.h" static void alpha_back_trace(struct gnu_request *, struct bt_info *); static int alpha_trace_status(struct gnu_request *, struct bt_info *); static void alpha_exception_frame(ulong, ulong, struct gnu_request *, struct bt_info *); static void alpha_frame_offset(struct gnu_request *, ulong); static int alpha_backtrace_resync(struct gnu_request *, ulong, struct bt_info *); static void alpha_print_stack_entry(struct gnu_request *, ulong, char *, ulong, struct bt_info *); static int alpha_resync_speculate(struct gnu_request *, ulong,struct bt_info *); static int alpha_dis_filter(ulong, char *, unsigned int); static void dis_address_translation(ulong, char *, unsigned int); static void alpha_cmd_mach(void); static int alpha_get_smp_cpus(void); static void alpha_display_machine_stats(void); static void alpha_dump_line_number(char *, ulong); static void display_hwrpb(unsigned int); static void alpha_post_init(void); static struct line_number_hook alpha_line_number_hooks[]; #define ALPHA_CONTINUE_TRACE (1) #define ALPHA_END_OF_TRACE (2) #define ALPHA_EXCEPTION_FRAME (3) #define ALPHA_SYSCALL_FRAME (4) #define ALPHA_MM_FAULT (5) #define ALPHA_INTERRUPT_PENDING (6) #define ALPHA_RESCHEDULE (7) #define ALPHA_DOWN_FAILED (8) #define ALPHA_RET_FROM_SMP_FORK (9) #define ALPHA_SIGNAL_RETURN (10) #define ALPHA_STRACE (11) static int alpha_eframe_search(struct bt_info *); static int alpha_uvtop(struct task_context *, ulong, physaddr_t *, int); static int alpha_kvtop(struct task_context *, ulong, physaddr_t *, int); static void alpha_back_trace_cmd(struct bt_info *); static ulong alpha_get_task_pgd(ulong task); static ulong alpha_processor_speed(void); static void alpha_dump_irq(int); static void alpha_get_stack_frame(struct bt_info *, ulong *, ulong *); static void get_alpha_frame(struct bt_info *, ulong *, ulong *); static int verify_user_eframe(struct bt_info *, ulong, ulong); static int alpha_translate_pte(ulong, void *, ulonglong); static uint64_t alpha_memory_size(void); static ulong alpha_vmalloc_start(void); static int alpha_is_task_addr(ulong); static int alpha_verify_symbol(const char *, ulong, char); struct percpu_data { ulong halt_PC; ulong halt_ra; ulong halt_pv; }; #define GET_HALT_PC 0x1 #define GET_HALT_RA 0x2 #define GET_HALT_PV 0x3 static ulong get_percpu_data(int, ulong, struct percpu_data *); /* * Do all necessary machine-specific setup here. This is called three times, * before symbol table initialization, and before and after GDB has been * initialized. */ void alpha_init(int when) { int tmp; switch (when) { case PRE_SYMTAB: machdep->verify_symbol = alpha_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: switch (symbol_value("_stext") & KSEG_BASE) { case KSEG_BASE: machdep->kvbase = KSEG_BASE; break; case KSEG_BASE_48_BIT: machdep->kvbase = KSEG_BASE_48_BIT; break; default: error(FATAL, "cannot determine KSEG base from _stext: %lx\n", symbol_value("_stext")); } machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = alpha_eframe_search; machdep->back_trace = alpha_back_trace_cmd; machdep->processor_speed = alpha_processor_speed; machdep->uvtop = alpha_uvtop; machdep->kvtop = alpha_kvtop; machdep->get_task_pgd = alpha_get_task_pgd; if (symbol_exists("irq_desc")) machdep->dump_irq = generic_dump_irq; else machdep->dump_irq = alpha_dump_irq; machdep->get_stack_frame = alpha_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = alpha_translate_pte; machdep->memory_size = alpha_memory_size; machdep->vmalloc_start = alpha_vmalloc_start; machdep->is_task_addr = alpha_is_task_addr; if (symbol_exists("console_crash")) { get_symbol_data("console_crash", sizeof(int), &tmp); if (tmp) machdep->flags |= HWRESET; } machdep->dis_filter = alpha_dis_filter; machdep->cmd_mach = alpha_cmd_mach; machdep->get_smp_cpus = alpha_get_smp_cpus; machdep->line_number_hooks = alpha_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: MEMBER_OFFSET_INIT(thread_struct_ptbr, "thread_struct", "ptbr"); MEMBER_OFFSET_INIT(hwrpb_struct_cycle_freq, "hwrpb_struct", "cycle_freq"); MEMBER_OFFSET_INIT(hwrpb_struct_processor_offset, "hwrpb_struct", "processor_offset"); MEMBER_OFFSET_INIT(hwrpb_struct_processor_size, "hwrpb_struct", "processor_size"); MEMBER_OFFSET_INIT(percpu_struct_halt_PC, "percpu_struct", "halt_PC"); MEMBER_OFFSET_INIT(percpu_struct_halt_ra, "percpu_struct", "halt_ra"); MEMBER_OFFSET_INIT(percpu_struct_halt_pv, "percpu_struct", "halt_pv"); MEMBER_OFFSET_INIT(switch_stack_r26, "switch_stack", "r26"); if (symbol_exists("irq_action")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_action, "irq_action", NULL, 0); else if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else machdep->nr_irqs = 0; if (!machdep->hz) machdep->hz = HZ; break; case POST_INIT: alpha_post_init(); break; } } /* * Unroll a kernel stack. */ static void alpha_back_trace_cmd(struct bt_info *bt) { char buf[BUFSIZE]; struct gnu_request *req; bt->flags |= BT_EXCEPTION_FRAME; if (CRASHDEBUG(1) || bt->debug) fprintf(fp, " => PC: %lx (%s) FP: %lx \n", bt->instptr, value_to_symstr(bt->instptr, buf, 0), bt->stkptr ); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_STACK_TRACE; req->flags = GNU_RETURN_ON_ERROR; req->buf = GETBUF(BUFSIZE); req->debug = bt->debug; req->task = bt->task; req->pc = bt->instptr; req->sp = bt->stkptr; if (bt->flags & BT_USE_GDB) { strcpy(req->buf, "backtrace"); gdb_interface(req); } else alpha_back_trace(req, bt); FREEBUF(req->buf); FREEBUF(req); } /* * Unroll the kernel stack. */ #define ALPHA_BACKTRACE_SPECULATE(X) \ { \ speculate_location = X; \ \ if (bt->flags & BT_SPECULATE) \ return; \ \ BZERO(btloc, sizeof(struct bt_info)); \ btloc->task = req->task; \ btloc->tc = bt->tc; \ btloc->stackbase = bt->stackbase; \ btloc->stacktop = bt->stacktop; \ btloc->flags = BT_TEXT_SYMBOLS_NOPRINT; \ hook.eip = 0; \ hook.esp = req->lastsp ? req->lastsp + sizeof(long) : 0; \ btloc->hp = &hook; \ \ back_trace(btloc); \ \ if (hook.esp && hook.eip) { \ req->hookp = &hook; \ if (alpha_resync_speculate(req, bt->flags, bt)) { \ req->pc = hook.eip; \ req->sp = hook.esp; \ continue; \ } \ goto show_remaining_text; \ } \ goto show_remaining_text; \ } static void alpha_back_trace(struct gnu_request *req, struct bt_info *bt) { char buf[BUFSIZE]; int frame; int done; int status; struct stack_hook hook; int eframe_same_pc_ra_function; int speculate_location; struct bt_info bt_info, *btloc; frame = 0; req->curframe = 0; btloc = &bt_info; if (!IS_KVADDR(req->pc)) { if (BT_REFERENCE_CHECK(bt)) return; if ((machdep->flags & HWRESET) && is_task_active(req->task)) { fprintf(fp, "(hardware reset while in user space)\n"); return; } fprintf(fp, "invalid pc: %lx\n", req->pc); alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME, req, bt); return; } for (done = FALSE; !done && (frame < 100); frame++) { speculate_location = 0; if ((req->name = closest_symbol(req->pc)) == NULL) { req->ra = req->pc = 0; if (alpha_backtrace_resync(req, bt->flags | BT_FROM_CALLFRAME, bt)) continue; if (BT_REFERENCE_FOUND(bt)) return; ALPHA_BACKTRACE_SPECULATE(1); } if (!INSTACK(req->sp, bt)) break; if (!is_kernel_text(req->pc)) ALPHA_BACKTRACE_SPECULATE(2); alpha_print_stack_entry(req, req->pc, req->name, bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; switch (status = alpha_trace_status(req, bt)) { case ALPHA_CONTINUE_TRACE: alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_END_OF_TRACE: done = TRUE; break; case ALPHA_STRACE: alpha_exception_frame(req->sp, BT_USER_EFRAME|BT_STRACE, req, bt); done = TRUE; break; case ALPHA_RET_FROM_SMP_FORK: alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RET_FROM_SMP_FORK, req, bt); done = TRUE; break; case ALPHA_DOWN_FAILED: frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_RESCHEDULE: alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RESCHEDULE, req, bt); done = TRUE; break; case ALPHA_MM_FAULT: alpha_exception_frame(req->sp, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; if (!IS_KVADDR(req->pc)) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_SYSCALL_FRAME: req->sp = verify_user_eframe(bt, req->task, req->sp) ? req->sp : USER_EFRAME_ADDR(req->task); alpha_exception_frame(req->sp, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_SIGNAL_RETURN: alpha_exception_frame(USER_EFRAME_ADDR(req->task), bt->flags, req, bt); done = TRUE; break; case ALPHA_EXCEPTION_FRAME: alpha_frame_offset(req, 0); if (!req->value) { fprintf(fp, "ALPHA EXCEPTION FRAME w/no frame offset for %lx (%s)\n", req->pc, value_to_symstr(req->pc, buf, 0)); done = TRUE; break; } alpha_exception_frame(req->sp + req->value, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { fprintf(fp, "ALPHA EXCEPTION FRAME w/no frame offset for %lx (%s)\n", req->pc, value_to_symstr(req->pc, buf, 0)); done = TRUE; break; } eframe_same_pc_ra_function = SAME_FUNCTION(req->pc, req->ra); frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; if (!IS_KVADDR(req->pc)) { done = TRUE; break; } if (STREQ(closest_symbol(req->pc), "ret_from_reschedule")) { alpha_exception_frame( USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RESCHEDULE, req, bt); done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); if (!is_kernel_text(req->pc)) { if (alpha_backtrace_resync(req, bt->flags | BT_FROM_EXCEPTION, bt)) break; if (BT_REFERENCE_FOUND(bt)) return; ALPHA_BACKTRACE_SPECULATE(3); } if (!eframe_same_pc_ra_function && (req->pc != req->ra)) { req->pc = req->ra; break; } req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_INTERRUPT_PENDING: alpha_frame_offset(req, 0); if (!req->value) { req->prevpc = req->pc; req->pc = req->addr; req->prevsp = req->sp; req->sp = req->frame; } else { req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; } break; } } return; show_remaining_text: if (BT_REFERENCE_CHECK(bt)) return; BZERO(btloc, sizeof(struct bt_info)); btloc->task = req->task; btloc->tc = bt->tc; btloc->stackbase = bt->stackbase; btloc->stacktop = bt->stacktop; btloc->flags = BT_TEXT_SYMBOLS_NOPRINT; hook.esp = req->lastsp + sizeof(long); btloc->hp = &hook; back_trace(btloc); if (hook.eip) { fprintf(fp, "NOTE: cannot resolve trace from this point -- remaining text symbols on stack:\n"); btloc->flags = BT_TEXT_SYMBOLS_PRINT|BT_ERROR_MASK; hook.esp = req->lastsp + sizeof(long); back_trace(btloc); } else fprintf(fp, "NOTE: cannot resolve trace from this point -- no remaining text symbols\n"); if (CRASHDEBUG(1)) fprintf(fp, "speculate_location: %d\n", speculate_location); alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME, req, bt); } /* * print one entry of a stack trace */ static void alpha_print_stack_entry(struct gnu_request *req, ulong callpc, char *name, ulong flags, struct bt_info *bt) { struct load_module *lm; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(name, bt->ref->str) || (STREQ(name, "strace") && STREQ(bt->ref->str, "entSys"))) { bt->ref->cmdflags |= BT_REF_FOUND; } break; case BT_REF_HEXVAL: if (bt->ref->hexval == callpc) bt->ref->cmdflags |= BT_REF_FOUND; break; } } else { fprintf(fp, "%s#%d [%lx] %s at %lx", req->curframe < 10 ? " " : "", req->curframe, req->sp, STREQ(name, "strace") ? "strace (via entSys)" : name, callpc); if (module_symbol(callpc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); } if (!(flags & BT_SPECULATE)) req->curframe++; if (flags & BT_SAVE_LASTSP) req->lastsp = req->sp; if (BT_REFERENCE_CHECK(bt)) return; if (flags & BT_LINE_NUMBERS) alpha_dump_line_number(name, callpc); } static const char *hook_files[] = { "arch/alpha/kernel/entry.S", "arch/alpha/kernel/head.S", "init/main.c", "arch/alpha/kernel/smp.c", }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define MAIN_C ((char **)&hook_files[2]) #define SMP_C ((char **)&hook_files[3]) static struct line_number_hook alpha_line_number_hooks[] = { {"entInt", ENTRY_S}, {"entMM", ENTRY_S}, {"entArith", ENTRY_S}, {"entIF", ENTRY_S}, {"entDbg", ENTRY_S}, {"kernel_clone", ENTRY_S}, {"kernel_thread", ENTRY_S}, {"__kernel_execve", ENTRY_S}, {"do_switch_stack", ENTRY_S}, {"undo_switch_stack", ENTRY_S}, {"entUna", ENTRY_S}, {"entUnaUser", ENTRY_S}, {"sys_fork", ENTRY_S}, {"sys_clone", ENTRY_S}, {"sys_vfork", ENTRY_S}, {"alpha_switch_to", ENTRY_S}, {"entSys", ENTRY_S}, {"ret_from_sys_call", ENTRY_S}, {"ret_from_reschedule", ENTRY_S}, {"restore_all", ENTRY_S}, {"strace", ENTRY_S}, {"strace_success", ENTRY_S}, {"strace_error", ENTRY_S}, {"syscall_error", ENTRY_S}, {"ret_success", ENTRY_S}, {"signal_return", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"reschedule", ENTRY_S}, {"sys_sigreturn", ENTRY_S}, {"sys_rt_sigreturn", ENTRY_S}, {"sys_sigsuspend", ENTRY_S}, {"sys_rt_sigsuspend", ENTRY_S}, {"ret_from_smpfork", ENTRY_S}, {"_stext", HEAD_S}, {"__start", HEAD_S}, {"__smp_callin", HEAD_S}, {"cserve_ena", HEAD_S}, {"cserve_dis", HEAD_S}, {"halt", HEAD_S}, {"start_kernel", MAIN_C}, {"smp_callin", SMP_C}, {NULL, NULL} /* list must be NULL-terminated */ }; static void alpha_dump_line_number(char *name, ulong callpc) { char buf[BUFSIZE], *p; int retries; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } /* * Look for the frame size storage at the beginning of a function. * If it's not obvious, try gdb. * * For future reference, here's where the numbers come from: * * 0xfffffc00003217e8 : subq sp,0x50,sp * fffffc00003217e8: 43ca153e * 010000 11110 01010000 1 0101001 11110 * * 0xfffffc0000321668 : subq sp,0x60,sp * fffffc0000321668: 43cc153e * 010000 11110 01100000 1 0101001 11110 * * 0xfffffc000035d028 : subq sp,0x70,sp * fffffc000035d028: 43ce153e * 010000 11110 01110000 1 0101001 11110 * * 0100 0011 110x xxxx xxx1 0101 0011 1110 * 1111 1111 111x xxxx xxx1 1111 1111 1111 * 0000 0000 0001 1111 1110 0000 0000 0000 * f f e 0 1 f f f instruction mask * 0 0 1 f e 0 0 0 offset * * stq ra,0(sp) * fffffc000035d034: b75e0000 */ static void alpha_frame_offset(struct gnu_request *req, ulong alt_pc) { uint *ip, ival; ulong value; req->value = value = 0; if (alt_pc && !is_kernel_text(alt_pc)) error(FATAL, "trying to get frame offset of non-text address: %lx\n", alt_pc); else if (!alt_pc && !is_kernel_text(req->pc)) error(FATAL, "trying to get frame offset of non-text address: %lx\n", req->pc); ip = alt_pc ? (int *)closest_symbol_value(alt_pc) : (int *)closest_symbol_value(req->pc); if (!ip) goto use_gdb; ival = 0; /* * Don't go any farther than "stq ra,0(sp)" (0xb75e0000) */ while (ival != 0xb75e0000) { if (!text_value_cache((ulong)ip, 0, &ival)) { readmem((ulong)ip, KVADDR, &ival, sizeof(uint), "uncached text value", FAULT_ON_ERROR); text_value_cache((ulong)ip, ival, NULL); } if ((ival & 0xffe01fff) == 0x43c0153e) { value = (ival & 0x1fe000) >> 13; break; } ip++; } if (value) { req->value = value; return; } use_gdb: #ifndef GDB_5_3 { static int gdb_frame_offset_warnings = 10; if (gdb_frame_offset_warnings-- > 0) error(WARNING, "GNU_ALPHA_FRAME_OFFSET functionality not ported to gdb\n"); } #endif req->command = GNU_ALPHA_FRAME_OFFSET; if (alt_pc) { ulong pc_save; pc_save = req->pc; req->pc = alt_pc; gdb_interface(req); req->pc = pc_save; } else gdb_interface(req); } /* * Look for key routines that either mean the trace has ended or has * bumped into an exception frame. */ int alpha_trace_status(struct gnu_request *req, struct bt_info *bt) { ulong value; char *func; ulong frame; req->addr = 0; func = req->name; frame = req->sp; if (STREQ(func, "start_kernel") || STREQ(func, "smp_callin") || STREQ(func, "kernel_thread") || STREQ(func, "__kernel_thread")) return ALPHA_END_OF_TRACE; if (STREQ(func, "ret_from_smp_fork") || STREQ(func, "ret_from_smpfork")) return ALPHA_RET_FROM_SMP_FORK; if (STREQ(func, "entSys")) return ALPHA_SYSCALL_FRAME; if (STREQ(func, "entMM")) { req->sp += 56; /* see entMM in entry.S */ return ALPHA_MM_FAULT; } if (STREQ(func, "do_entInt")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entArith")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entIF")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entDbg")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "handle_bottom_half")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "handle_softirq")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "reschedule")) return ALPHA_RESCHEDULE; if (STREQ(func, "ret_from_reschedule")) return ALPHA_RESCHEDULE; if (STREQ(func, "signal_return")) return ALPHA_SIGNAL_RETURN; if (STREQ(func, "strace")) return ALPHA_STRACE; if (STREQ(func, "__down_failed") || STREQ(func, "__down_failed_interruptible")) { readmem(req->sp + 144, KVADDR, &req->pc, sizeof(ulong), "__down_failed r26", FAULT_ON_ERROR); req->sp += 160; return ALPHA_DOWN_FAILED; } value = GET_STACK_ULONG(frame); if (STREQ(closest_symbol(value), "do_entInt") || STREQ(closest_symbol(value), "do_entArith") || STREQ(closest_symbol(value), "do_entIF") || STREQ(closest_symbol(value), "do_entDbg")) { req->addr = value; req->frame = 0; while (INSTACK(frame, bt)) { frame += sizeof(ulong); value = GET_STACK_ULONG(frame); if (STREQ(closest_symbol(value), "ret_from_sys_call")) { alpha_frame_offset(req, req->addr); /* req->frame = frame + req->value; XXX */ break; } } return ALPHA_INTERRUPT_PENDING; } return ALPHA_CONTINUE_TRACE; } /* * Redo the gdb pt_regs structure output. */ enum regnames { _r0_, _r1_, _r2_, _r3_, _r4_, _r5_, _r6_, _r7_, _r8_, _r19_, _r20_, _r21_, _r22_, _r23_, _r24_, _r25_, _r26_, _r27_, _r28_, _hae_, _trap_a0_, _trap_a1_, _trap_a2_, _ps_, _pc_, _gp_, _r16_, _r17_, _r18_, NUMREGS}; struct alpha_eframe { char regs[30][30]; ulong value[29]; }; static void alpha_exception_frame(ulong addr, ulong flags, struct gnu_request *req, struct bt_info *bt) { int i, j; char buf[BUFSIZE]; ulong value; physaddr_t paddr; struct alpha_eframe eframe; if (CRASHDEBUG(4)) fprintf(fp, "alpha_exception_frame: %lx\n", addr); if (flags & BT_SPECULATE) { req->pc = 0; fprintf(fp, "ALPHA EXCEPTION FRAME\n"); return; } BZERO(&eframe, sizeof(struct alpha_eframe)); open_tmpfile(); dump_struct("pt_regs", addr, RADIX(16)); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { strip_comma(clean_line(buf)); if (!strstr(buf, "0x")) continue; extract_hex(buf, &value, NULLCHAR, TRUE); if (CRASHDEBUG(4)) fprintf(pc->saved_fp, "<%s> %lx\n", buf, value); if (STRNEQ(buf, "r0 = ")) { sprintf(eframe.regs[_r0_], " V0/R0: %016lx", value); eframe.value[_r0_] = value; } if (STRNEQ(buf, "r1 = ")) { sprintf(eframe.regs[_r1_], " T0/R1: %016lx", value); eframe.value[_r1_] = value; } if (STRNEQ(buf, "r2 = ")) { sprintf(eframe.regs[_r2_], " T1/R2: %016lx", value); eframe.value[_r2_] = value; } if (STRNEQ(buf, "r3 = ")) { sprintf(eframe.regs[_r3_], " T2/R3: %016lx", value); eframe.value[_r3_] = value; } if (STRNEQ(buf, "r4 = ")) { sprintf(eframe.regs[_r4_], " T3/R4: %016lx", value); eframe.value[_r4_] = value; } if (STRNEQ(buf, "r5 = ")) { sprintf(eframe.regs[_r5_], " T4/R5: %016lx", value); eframe.value[_r5_] = value; } if (STRNEQ(buf, "r6 = ")) { sprintf(eframe.regs[_r6_], " T5/R6: %016lx", value); eframe.value[_r6_] = value; } if (STRNEQ(buf, "r7 = ")) { sprintf(eframe.regs[_r7_], " T6/R7: %016lx", value); eframe.value[_r7_] = value; } if (STRNEQ(buf, "r8 = ")) { sprintf(eframe.regs[_r8_], " T7/R8: %016lx", value); eframe.value[_r8_] = value; } if (STRNEQ(buf, "r19 = ")) { sprintf(eframe.regs[_r19_], " A3/R19: %016lx", value); eframe.value[_r19_] = value; } if (STRNEQ(buf, "r20 = ")) { sprintf(eframe.regs[_r20_], " A4/R20: %016lx", value); eframe.value[_r20_] = value; } if (STRNEQ(buf, "r21 = ")) { sprintf(eframe.regs[_r21_], " A5/R21: %016lx", value); eframe.value[_r21_] = value; } if (STRNEQ(buf, "r22 = ")) { sprintf(eframe.regs[_r22_], " T8/R22: %016lx", value); eframe.value[_r22_] = value; } if (STRNEQ(buf, "r23 = ")) { sprintf(eframe.regs[_r23_], " T9/R23: %016lx", value); eframe.value[_r23_] = value; } if (STRNEQ(buf, "r24 = ")) { sprintf(eframe.regs[_r24_], "T10/R24: %016lx", value); eframe.value[_r24_] = value; } if (STRNEQ(buf, "r25 = ")) { sprintf(eframe.regs[_r25_], "T11/R25: %016lx", value); eframe.value[_r25_] = value; } if (STRNEQ(buf, "r26 = ")) { sprintf(eframe.regs[_r26_], " RA/R26: %016lx", value); eframe.value[_r26_] = value; } if (STRNEQ(buf, "r27 = ")) { sprintf(eframe.regs[_r27_], "T12/R27: %016lx", value); eframe.value[_r27_] = value; } if (STRNEQ(buf, "r28 = ")) { sprintf(eframe.regs[_r28_], " AT/R28: %016lx", value); eframe.value[_r28_] = value; } if (STRNEQ(buf, "hae = ")) { sprintf(eframe.regs[_hae_], " HAE: %016lx", value); eframe.value[_hae_] = value; } if (STRNEQ(buf, "trap_a0 = ")) { sprintf(eframe.regs[_trap_a0_], "TRAP_A0: %016lx", value); eframe.value[_trap_a0_] = value; } if (STRNEQ(buf, "trap_a1 = ")) { sprintf(eframe.regs[_trap_a1_], "TRAP_A1: %016lx", value); eframe.value[_trap_a1_] = value; } if (STRNEQ(buf, "trap_a2 = ")) { sprintf(eframe.regs[_trap_a2_], "TRAP_A2: %016lx", value); eframe.value[_trap_a2_] = value; } if (STRNEQ(buf, "ps = ")) { sprintf(eframe.regs[_ps_], " PS: %016lx", value); eframe.value[_ps_] = value; } if (STRNEQ(buf, "pc = ")) { sprintf(eframe.regs[_pc_], " PC: %016lx", value); eframe.value[_pc_] = value; } if (STRNEQ(buf, "gp = ")) { sprintf(eframe.regs[_gp_], " GP/R29: %016lx", value); eframe.value[_gp_] = value; } if (STRNEQ(buf, "r16 = ")) { sprintf(eframe.regs[_r16_], " A0/R16: %016lx", value); eframe.value[_r16_] = value; } if (STRNEQ(buf, "r17 = ")) { sprintf(eframe.regs[_r17_], " A1/R17: %016lx", value); eframe.value[_r17_] = value; } if (STRNEQ(buf, "r18 =")) { sprintf(eframe.regs[_r18_], " A2/R18: %016lx", value); eframe.value[_r18_] = value; } } close_tmpfile(); if ((flags & BT_EXCEPTION_FRAME) && !BT_REFERENCE_CHECK(bt)) { dump_eframe: fprintf(fp, " EFRAME: %lx ", addr); fprintf(fp, "%s\n", eframe.regs[_r24_]); for (i = 0; i < (((NUMREGS+1)/2)-1); i++) { fprintf(fp, "%s ", eframe.regs[i]); pad_line(fp, 21 - strlen(eframe.regs[i]), ' '); j = i+((NUMREGS+1)/2); fprintf(fp, "%s", eframe.regs[j]); if (((j == _pc_) || (j == _r26_)) && is_kernel_text(eframe.value[j])) fprintf(fp, " <%s>", value_to_symstr(eframe.value[j], buf, 0)); fprintf(fp, "\n"); } } req->ra = eframe.value[_r26_]; req->pc = eframe.value[_pc_]; req->sp = addr + (29 * sizeof(ulong)); if (flags & BT_USER_EFRAME) { flags &= ~BT_USER_EFRAME; if (!BT_REFERENCE_CHECK(bt) && (eframe.value[_ps_] == 8) && (((uvtop(task_to_context(req->task), req->pc, &paddr, 0) || (volatile ulong)paddr) && (uvtop(task_to_context(req->task), req->ra, &paddr, 0) || (volatile ulong)paddr)) || (IS_ZOMBIE(req->task) || IS_EXITING(req->task)))) { if (!(flags & (BT_RESCHEDULE|BT_RET_FROM_SMP_FORK|BT_STRACE))) fprintf(fp, "NOTE: kernel-entry exception frame:\n"); goto dump_eframe; } } } /* * Look for likely exception frames in a stack. */ struct alpha_pt_regs { ulong reg_value[NUMREGS]; }; static int alpha_eframe_search(struct bt_info *bt) { ulong *first, *last; ulong eframe; struct alpha_pt_regs *pt; struct gnu_request *req; /* needed for alpha_exception_frame */ ulong *stack; int cnt; stack = (ulong *)bt->stackbuf; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->task = bt->task; first = stack + (roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)); last = stack + (((bt->stacktop - bt->stackbase) - SIZE(pt_regs)) / sizeof(ulong)); for (cnt = 0; first <= last; first++) { pt = (struct alpha_pt_regs *)first; /* check for kernel exception frame */ if (!(pt->reg_value[_ps_] & 0xfffffffffffffff8) && (is_kernel_text(pt->reg_value[_pc_]) || IS_MODULE_VADDR(pt->reg_value[_pc_])) && (is_kernel_text(pt->reg_value[_r26_]) || IS_MODULE_VADDR(pt->reg_value[_r26_])) && IS_KVADDR(pt->reg_value[_gp_])) { cnt++; if (bt->flags & BT_EFRAME_COUNT) continue; fprintf(fp, "\nKERNEL-MODE EXCEPTION FRAME:\n"); eframe = bt->task + ((ulong)first - (ulong)stack); alpha_exception_frame(eframe, BT_EXCEPTION_FRAME, req, bt); continue; } /* check for user exception frame */ if ((pt->reg_value[_ps_] == 0x8) && ((IN_TASK_VMA(bt->task, pt->reg_value[_pc_]) && IN_TASK_VMA(bt->task, pt->reg_value[_r26_]) && IS_UVADDR(pt->reg_value[_gp_], bt->tc)) || ((first == last) && (IS_ZOMBIE(bt->task) || IS_EXITING(bt->task))))) { cnt++; if (bt->flags & BT_EFRAME_COUNT) continue; fprintf(fp, "\nUSER-MODE EXCEPTION FRAME:\n"); eframe = bt->task + ((ulong)first - (ulong)stack); alpha_exception_frame(eframe, BT_EXCEPTION_FRAME, req, bt); } } FREEBUF(req); return cnt; } /* * Before dumping a nonsensical exception frame, give it a quick test. */ static int verify_user_eframe(struct bt_info *bt, ulong task, ulong sp) { struct alpha_pt_regs ptbuf, *pt; readmem(sp, KVADDR, &ptbuf, sizeof(struct alpha_pt_regs), "pt_regs", FAULT_ON_ERROR); pt = &ptbuf; if ((pt->reg_value[_ps_] == 0x8) && ((IN_TASK_VMA(task, pt->reg_value[_pc_]) && IN_TASK_VMA(task, pt->reg_value[_r26_]) && IS_UVADDR(pt->reg_value[_gp_], bt->tc)) || ((pt == (struct alpha_pt_regs *)USER_EFRAME_ADDR(task)) && (IS_ZOMBIE(task) || IS_EXITING(task))))) { return TRUE; } return FALSE; } /* * Try to resync the stack location when there is no valid stack frame, * typically just above an exception frame. Use the req->ra value from the * exception frame as the new starting req->pc. Then walk up the stack until * a text routine that calls the newly-assigned pc is found -- that stack * location then becomes the new req->sp. * * If we're not coming from an exception frame, req-ra and req->pc will be * purposely zeroed out. In that case, use the prevsp value to find the * first pc that called the last frame's pc. * * Add any other repeatable "special-case" frames to the beginning of this * routine (ex. debug_spin_lock). Last ditch -- at the end of this routine, * speculate what might have happened (possibly in the background) -- and * if it looks good, run with it. */ static int alpha_backtrace_resync(struct gnu_request *req, ulong flags, struct bt_info *bt) { char addr[BUFSIZE]; char buf[BUFSIZE]; char lookfor1[BUFSIZE]; char lookfor2[BUFSIZE]; ulong newpc; ulong *stkp; ulong *stkp_newpc, *stkp_next; ulong value; int found; char *name; int exception; if (CRASHDEBUG(1)) fprintf(fp, "RESYNC1: [%lx-%d] ra: %lx pc: %lx sp: %lx\n", flags, req->curframe, req->ra, req->pc, req->sp); if (!req->ra && !req->pc) { req->ra = req->prevpc; exception = FALSE; } else exception = TRUE; if (!IS_KVADDR(req->ra)) return FALSE; name = closest_symbol(req->ra); sprintf(lookfor1, "<%s>", name); sprintf(lookfor2, "<%s+", name); if (CRASHDEBUG(1)) fprintf(fp, "RESYNC2: exception: %s lookfor: %s or %s\n", exception ? "TRUE" : "FALSE", lookfor1, lookfor2); /* * This is common when a non-panicking active CPU is spinning * in debug_spin_lock(). The next pc is offset by 0x30 from * the top of the exception frame, and the next sp is equal * to the frame offset of debug_spin_lock(). I can't explain it... */ if ((flags & BT_FROM_EXCEPTION) && STREQ(name, "debug_spin_lock")) { alpha_print_stack_entry(req, req->ra, closest_symbol(req->ra), flags, bt); if (BT_REFERENCE_FOUND(bt)) return FALSE; alpha_frame_offset(req, req->ra); stkp = (ulong *)(req->sp + 0x30); value = GET_STACK_ULONG(stkp); if (!is_kernel_text(value)) { req->sp = req->prevsp; return FALSE; } req->pc = value; req->sp += req->value; return TRUE; } /* * If the ra is a system call, then all we should have to do is * find the next reference to entSys on the stack, and set the * sp to that value. */ if (is_system_call(name, 0)) { /* stkp = (ulong *)req->sp; */ stkp = (ulong *)req->prevsp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (IS_KVADDR(value) && is_kernel_text(value)) { if (STREQ(closest_symbol(value), "entSys")) { req->pc = value; req->sp = USER_EFRAME_ADDR(req->task); return TRUE; } } } } /* * Just find the next location containing text. (?) */ if (STREQ(name, "do_coredump")) { stkp = (ulong *)(req->sp + sizeof(long)); for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (IS_KVADDR(value) && is_kernel_text(value)) { req->pc = req->ra; req->sp = (ulong)stkp; return TRUE; } } } if (flags & BT_SPECULATE) return FALSE; if (CRASHDEBUG(1)) { fprintf(fp, "RESYNC3: prevsp: %lx ra: %lx name: %s\n", req->prevsp, req->ra, name); fprintf(fp, "RESYNC3: prevpc: %lx\n", req->prevpc); } stkp_newpc = stkp_next = 0; newpc = 0; found = FALSE; if (exception) { newpc = req->ra; stkp = (ulong *)req->sp; } else stkp = (ulong *)req->prevsp; if (CRASHDEBUG(1)) fprintf(fp, "RESYNC4: stkp: %lx newpc: %lx\n", (ulong)stkp, newpc); for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); /* * First find the new pc on the stack. */ if (!found) { if (!exception && is_kernel_text(value)) { found = TRUE; } else if (value == newpc) { found = TRUE; stkp_newpc = stkp; continue; } } if (!IS_KVADDR(value)) continue; if (is_kernel_text(value)) { if (!stkp_next) stkp_next = stkp; if (CRASHDEBUG(2)) { fprintf(fp, "RESYNC6: disassemble %lx (%s)\n", value - sizeof(uint), value_to_symstr(value - sizeof(uint), buf, 0)); } req->command = GNU_DISASSEMBLE; req->addr = value - sizeof(uint); sprintf(addr, "0x%lx", req->addr); open_tmpfile(); req->fp = pc->tmpfile; gdb_interface(req); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { clean_line(buf); if (STRNEQ(buf, "Dump of") || STRNEQ(buf, "End of")) continue; if (STRNEQ(buf, addr)) { if (LASTCHAR(buf) == ':') { fgets(buf, BUFSIZE, pc->tmpfile); clean_line(buf); } if (CRASHDEBUG(2) && (strstr(buf, "jsr") || strstr(buf, "bsr"))) fprintf(pc->saved_fp, "%s\n", buf); if ((strstr(buf, "jsr") || strstr(buf, "bsr")) && (strstr(buf, lookfor1) || strstr(buf, lookfor2))) { if (exception) { req->pc = newpc; req->sp = (ulong)stkp; } else req->pc = req->addr; close_tmpfile(); return TRUE; } } } close_tmpfile(); } } if (CRASHDEBUG(1)) { fprintf(fp, "RESYNC9: [%d] name: %s pc: %lx ra: %lx\n", req->curframe, name, req->pc, req->ra); fprintf(fp, "RESYNC9: sp: %lx lastsp: %lx\n", req->sp, req->lastsp); fprintf(fp, "RESYNC9: prevpc: %lx prevsp: %lx\n", req->prevpc, req->prevsp); } /* * At this point, all we can do is speculate based upon * past experiences... */ return (alpha_resync_speculate(req, flags, bt)); } /* * Try one level of speculation. If it works, fine -- if not, give up. */ static int alpha_resync_speculate(struct gnu_request *req, ulong flags, struct bt_info *bt) { ulong *stkp; ulong value; ulong found_sp, found_ra; struct stack_hook hook; struct bt_info bt_info, *btloc; char buf[BUFSIZE]; int kernel_thread; int looks_good; if (flags & BT_SPECULATE) /* already been here on this trace... */ return FALSE; if (pc->tmpfile) return FALSE; found_ra = found_sp = 0; kernel_thread = is_kernel_thread(req->task); /* * Add "known" possibilities here. */ switch (flags & (BT_FROM_EXCEPTION|BT_FROM_CALLFRAME)) { case BT_FROM_EXCEPTION: if (STREQ(closest_symbol(req->prevpc), "read_lock") || STREQ(closest_symbol(req->ra), "do_select") || STREQ(closest_symbol(req->ra), "schedule")) { stkp = (ulong *)req->sp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (value == req->ra) found_ra = value; } } break; case BT_FROM_CALLFRAME: if (STREQ(closest_symbol(req->ra), "sys_read")) { value = GET_STACK_ULONG(req->prevsp - 32); if (STREQ(closest_symbol(value), "entSys")) { found_ra = value; found_sp = req->prevsp - 32; } } else if (STREQ(closest_symbol(req->ra), "exit_autofs4_fs")) { stkp = (ulong *)req->sp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra && (value != found_ra)) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (is_kernel_text_offset(value)) found_ra = value; } } break; default: if (req->hookp && STREQ(closest_symbol(req->prevpc), "filemap_nopage") && !STREQ(closest_symbol(req->hookp->eip), "do_no_page")) { found_ra = found_sp = 0; stkp = (ulong *)req->prevsp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra && (value != found_ra)) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (is_kernel_text_offset(value) && STREQ(closest_symbol(value), "do_no_page")) found_ra = value; } if (found_ra && found_sp) { req->hookp->eip = found_ra; req->hookp->esp = found_sp; return TRUE; } } if (req->hookp) { found_ra = req->hookp->eip; found_sp = req->hookp->esp; } break; } if (found_ra && found_sp) { looks_good = FALSE; hook.esp = found_sp; hook.eip = found_ra; if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "----- RESYNC SPECULATE START -----\n"); open_tmpfile(); btloc = &bt_info; BZERO(btloc, sizeof(struct bt_info)); btloc->task = req->task; btloc->tc = bt->tc; btloc->stackbase = bt->stackbase; btloc->stacktop = bt->stacktop; btloc->flags = BT_SPECULATE; btloc->hp = &hook; back_trace(btloc); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "%s", buf); if (strstr(buf, "NOTE: cannot resolve")) { looks_good = FALSE; break; } if (strstr(buf, "ALPHA EXCEPTION FRAME")) { looks_good = TRUE; break; } if (kernel_thread) { if (strstr(buf, " kernel_thread ") || strstr(buf, " __kernel_thread ") || strstr(buf, " start_kernel ") || strstr(buf, " smp_callin ")) { looks_good = TRUE; break; } } } close_tmpfile(); if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "----- RESYNC SPECULATE DONE ------\n"); if (looks_good) { req->pc = found_ra; req->sp = found_sp; return TRUE; } } return FALSE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ static int alpha_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { pgd = (ulong *)machdep->get_task_pgd(tc->task); } else { if (!tc->mm_struct) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte & _PAGE_VALID)) goto no_upage; page_middle = (ulong *) (PTOV((pgd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + ((vaddr >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte & _PAGE_VALID)) goto no_upage; page_table = (ulong *) (PTOV((pmd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + (BTOP(vaddr) & (PTRS_PER_PAGE - 1)); FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_VALID))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); alpha_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = ((pte & _PFN_MASK) >> (32-PAGESHIFT())) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); alpha_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int alpha_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { /* presume KSEG this early */ *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); return TRUE; } pgd = (ulong *)vt->kernel_pgd[0]; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte & _PAGE_VALID)) goto no_kpage; page_middle = (ulong *) (PTOV((pgd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte & _PAGE_VALID)) goto no_kpage; page_table = (ulong *) (PTOV((pmd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + (BTOP(kvaddr) & (PTRS_PER_PAGE - 1)); FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_VALID))) { if (pte && verbose) { fprintf(fp, "\n"); alpha_translate_pte(pte, 0, 0); } goto no_kpage; } *paddr = ((pte & _PFN_MASK) >> (32-PAGESHIFT())) + PAGEOFFSET(kvaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); alpha_translate_pte(pte, 0, 0); } return TRUE; no_kpage: return FALSE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong alpha_get_task_pgd(ulong task) { long offset; ulong ptbr; offset = OFFSET_OPTION(task_struct_thread, task_struct_tss); offset += OFFSET(thread_struct_ptbr); readmem(task + offset, KVADDR, &ptbr, sizeof(ulong), "task thread ptbr", FAULT_ON_ERROR); return(PTOV(PTOB(ptbr))); } /* * Calculate and return the speed of the processor. */ static ulong alpha_processor_speed(void) { ulong hwrpb; long offset; long cycle_freq; ulong mhz; if (machdep->mhz) return machdep->mhz; mhz = 0; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); offset = OFFSET(hwrpb_struct_cycle_freq); if (!hwrpb || (offset == -1) || !readmem(hwrpb+offset, KVADDR, &cycle_freq, sizeof(ulong), "hwrpb cycle_freq", RETURN_ON_ERROR)) return (machdep->mhz = mhz); mhz = cycle_freq/1000000; return (machdep->mhz = mhz); } void alpha_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & HWRESET) fprintf(fp, "%sHWRESET", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: alpha_eframe_search()\n"); fprintf(fp, " back_trace: alpha_back_trace_cmd()\n"); fprintf(fp, " processor_speed: alpha_processor_speed()\n"); fprintf(fp, " uvtop: alpha_uvtop()\n"); fprintf(fp, " kvtop: alpha_uvtop()\n"); fprintf(fp, " get_task_pgd: alpha_get_task_pgd()\n"); if (machdep->dump_irq == generic_dump_irq) fprintf(fp, " dump_irq: generic_dump_irq()\n"); else fprintf(fp, " dump_irq: alpha_dump_irq()\n"); fprintf(fp, " get_stack_frame: alpha_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: alpha_translate_pte()\n"); fprintf(fp, " memory_size: alpha_get_memory_size()\n"); fprintf(fp, " vmalloc_start: alpha_get_vmalloc_start()\n"); fprintf(fp, " is_task_addr: alpha_is_task_addr()\n"); fprintf(fp, " verify_symbol: alpha_verify_symbol()\n"); fprintf(fp, " dis_filter: alpha_dis_filter()\n"); fprintf(fp, " cmd_mach: alpha_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: alpha_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: alpha_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } /* * Fix up jsr's to show the right target. * * If a value is passed with no buf, then cmd_dis is fishing for whether * the GP can be calculated from the first couple of instructions of the * target routine: * * 0xfffffc0000349fa0 : ldah gp,35(t12) * 0xfffffc0000349fa4 : lda gp,6216(gp) * * If a buf pointer is passed, then check whether the t12 register * is being set up as an offset from gp, then calculate the target address: * * 0xfffffc000042c364 : ldq t12,-29336(gp) * 0xfffffc000042c368 : * jsr ra,(t12),0xfffffc0000429dc0 * * If the next instruction is a jsr ra,(t12), then correct the bracketed * target address translation. * */ #define LDAH_GP_T12 (0x27bb0000) #define LDA_GP_GP (0x23bd0000) #define LDQ_T12_GP (0xa77d0000) #define JSR_RA_T12 (0x6b5b0000) #define OPCODE_OPERAND_MASK (0xffff0000) #define OPCODE_MEM_DISP_MASK (0x0000ffff) static struct instruction_data { uint inst[2]; short mem_disp[2]; ulong gp; ulong target; char *curfunc; } instruction_data = { {0} }; static int alpha_dis_filter(ulong vaddr, char *buf, unsigned int output_radix) { struct syment *sp; struct instruction_data *id; char buf2[BUFSIZE], *p1; id = &instruction_data; if (!buf) { BZERO(id, sizeof(struct instruction_data)); if (!(sp = value_search(vaddr, NULL))) return FALSE; readmem(sp->value, KVADDR, &id->inst[0], sizeof(uint) * 2, "two instructions", FAULT_ON_ERROR); if (((id->inst[0] & OPCODE_OPERAND_MASK) == LDAH_GP_T12) && ((id->inst[1] & OPCODE_OPERAND_MASK) == LDA_GP_GP)) { id->mem_disp[0] = (short)(id->inst[0] & OPCODE_MEM_DISP_MASK); id->mem_disp[1] = (short)(id->inst[1] & OPCODE_MEM_DISP_MASK); id->gp = sp->value + (65536*id->mem_disp[0]) + id->mem_disp[1]; id->curfunc = sp->name; if (CRASHDEBUG(1)) console("%s: ldah(%d) and lda(%d) gp: %lx\n", id->curfunc, id->mem_disp[0], id->mem_disp[1], id->gp); return TRUE; } /* send all lines through the generic */ return TRUE; /* dis_address_translation() filter */ } dis_address_translation(vaddr, buf, output_radix); if (!id->gp || !(sp = value_search(vaddr, NULL)) || !STREQ(id->curfunc, sp->name)) { BZERO(id, sizeof(struct instruction_data)); return FALSE; } readmem(vaddr, KVADDR, &id->inst[0], sizeof(uint), "one instruction", FAULT_ON_ERROR); if ((id->inst[0] & OPCODE_OPERAND_MASK) == JSR_RA_T12) { if (!id->target || !strstr(buf, "jsr\tra,(t12)") || !strstr(buf, "<")) return FALSE; p1 = strstr(strstr(buf, "jsr"), "0x"); sprintf(p1, "0x%lx <%s>%s", id->target, value_to_symstr(id->target, buf2, output_radix), CRASHDEBUG(1) ? " [PATCHED]\n" : "\n"); return TRUE; } if ((id->inst[0] & OPCODE_OPERAND_MASK) == LDQ_T12_GP) { id->mem_disp[0] = (short)(id->inst[0] & OPCODE_MEM_DISP_MASK); readmem(id->gp + id->mem_disp[0], KVADDR, &id->target, sizeof(ulong), "jsr target", FAULT_ON_ERROR); } else id->target = 0; return TRUE; } /* * For some reason gdb can go off into the weeds translating text addresses, * so this routine both fixes the references as well as imposing the current * output radix on the translations. */ static void dis_address_translation(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && (*p1 != ',')) p1--; if (!STRNEQ(p1, ",0x")) return; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); } /* * If we're generically-inclined, call generic_dump_irq(). Otherwise * dump the IRQ table the old-fashioned way. */ static void alpha_dump_irq(int irq) { ulong action; ulong value; char *arglist[MAXARGS]; int argc, others; char buf[BUFSIZE]; if (symbol_exists("irq_desc")) { machdep->dump_irq = generic_dump_irq; return(generic_dump_irq(irq)); } action = symbol_value("irq_action") + (sizeof(void *) * irq); readmem(action, KVADDR, &action, sizeof(void *), "irq_action pointer", FAULT_ON_ERROR); if (!action) { fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, "handler:\n"); fprintf(fp, " flags: \n"); fprintf(fp, " mask: \n"); fprintf(fp, " name: \n"); fprintf(fp, " dev_id: \n"); fprintf(fp, " next: \n\n"); return; } fprintf(fp, " IRQ: %d\n", irq); open_tmpfile(); do_linked_action: dump_struct("irqaction", action, RADIX(16)); action = 0; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { strip_comma(buf); argc = parse_line(buf, arglist); if (STREQ(arglist[0], "struct") || STREQ(buf, "};")) continue; if (STREQ(arglist[0], "handler")) { fprintf(pc->saved_fp, "handler: %s ", strip_hex(arglist[2])); if (argc == 4) fprintf(pc->saved_fp, "%s", arglist[3]); fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "flags")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " flags: %lx ", value); if (value) { others = 0; fprintf(pc->saved_fp, "("); if (value & SA_INTERRUPT) fprintf(pc->saved_fp, "%sSA_INTERRUPT", others++ ? "|" : ""); if (value & SA_PROBE) fprintf(pc->saved_fp, "%sSA_PROBE", others++ ? "|" : ""); if (value & SA_SAMPLE_RANDOM) fprintf(pc->saved_fp, "%sSA_SAMPLE_RANDOM", others++ ? "|" : ""); if (value & SA_SHIRQ) fprintf(pc->saved_fp, "%sSA_SHIRQ", others++ ? "|" : ""); fprintf(pc->saved_fp, ")"); if (value & ~ACTION_FLAGS) { fprintf(pc->saved_fp, " (bits %lx not translated)", value & ~ACTION_FLAGS); } } fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "mask")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " mask: %lx\n", value); } if (STREQ(arglist[0], "name")) { fprintf(pc->saved_fp, " name: %s ", strip_hex(arglist[2])); if (argc == 4) fprintf(pc->saved_fp, "\"%s\"", arglist[3]); fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "dev_id")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " dev_id: %lx\n", value); } if (STREQ(arglist[0], "next")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " next: %s\n", strip_hex(arglist[2])); if (value) action = value; } } close_tmpfile(); fprintf(fp, "\n"); if (action) goto do_linked_action; } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void alpha_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { struct syment *sp; ulong ksp; ulong ip; if (pcp) { if (DUMPFILE() && is_panic_thread(bt->task)) { sp = next_symbol("crash_save_current_state", NULL); if (HWRESET_TASK(bt->task)) ip = get_percpu_data(0, GET_HALT_PC, 0); else if (sp) ip = sp->value - 4; else ip = symbol_value("crash_save_current_state") + 16; } else get_alpha_frame(bt, &ip, NULL); *pcp = ip; } if (spp) { ip = 0; if (!get_panic_ksp(bt, &ksp)) get_alpha_frame(bt, HWRESET_TASK(bt->task) ? &ip : NULL, &ksp); if (!INSTACK(ksp, bt)) error(FATAL, "cannot determine starting stack address\n", bt->task); *spp = ksp; if (ip) *pcp = ip; } } /* * Do the work formerly done by alpha_get_sp() and alpha_get_pc(). */ static void get_alpha_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { int i; ulong ip; ulong r26; ulong ksp, sp; ulong *spp; ulong percpu_ra; ulong percpu_pv; struct percpu_data percpu_data; char buf[BUFSIZE]; ulong task; ulong *stack; task = bt->task; stack = (ulong *)bt->stackbuf; if (tt->flags & THREAD_INFO) { /* pcb.ksp is 1st word in thread_info */ readmem(bt->tc->thread_info, KVADDR, &ksp, sizeof(ulong), "thread_info pcb ksp", FAULT_ON_ERROR); sp = ksp; } else if (VALID_MEMBER(task_struct_tss_ksp)) ksp = sp = stack[OFFSET(task_struct_tss_ksp)/sizeof(long)]; else ksp = sp = stack[OFFSET(task_struct_thread_ksp)/sizeof(long)]; ip = 0; percpu_ra = percpu_pv = 0; spp = &stack[(sp - task)/sizeof(long)]; if (DUMPFILE() && getsp) { if (HWRESET_TASK(task)) { if (INSTACK(sp, bt)) { *getsp = sp; return; } else { get_percpu_data(0, 0, &percpu_data); percpu_ra = percpu_data.halt_ra; percpu_pv = percpu_data.halt_pv; spp = &stack[roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)]; } } if (!percpu_ra && (STREQ(closest_symbol(*spp), "panic") || STREQ(closest_symbol(*spp), "handle_ipi"))) { *getsp = sp; return; } } percpu_retry: if (CRASHDEBUG(1) && percpu_ra) { fprintf(fp, "get_alpha_frame: look for %lx (%s)\n", percpu_ra, value_to_symstr(percpu_ra, buf, 0)); } for (i = 0, spp++; spp < &stack[LONGS_PER_STACK]; spp++,i++) { if (CRASHDEBUG(1) && (percpu_ra || percpu_pv) && is_kernel_text(*spp)) { fprintf(fp, "%lx: %lx (%s)\n", ((ulong)spp - (ulong)stack) + task, *spp, value_to_symstr(*spp, buf, 0)); } if (percpu_ra) { if (*spp == percpu_ra) { *getsp = ((ulong)spp - (ulong)stack) + task; return; } continue; } else if (percpu_pv) { if (*spp == percpu_pv) { *getsp = ((ulong)spp - (ulong)stack) + task; if (getpc) *getpc = percpu_pv; return; } continue; } if (!INSTACK(*spp, bt)) continue; if (is_kernel_text(*(spp+1))) { sp = *spp; ip = *(spp+1); break; } } if (percpu_ra) { percpu_ra = 0; error(INFO, "cannot find return address (percpu_ra) in HARDWARE RESET stack\n"); error(INFO, "looking for procedure address (percpu_pv) in HARDWARE RESET stack\n"); if (CRASHDEBUG(1)) { fprintf(fp, "get_alpha_frame: look for %lx (%s)\n", percpu_pv, value_to_symstr(percpu_pv, buf, 0)); } spp = &stack[roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)]; goto percpu_retry; } if (percpu_pv) { error(INFO, "cannot find procedure address (percpu_pv) in HARDWARE RESET stack\n"); } /* * Check for a forked task that has not yet run in user space. */ if (!ip) { if (INSTACK(ksp + OFFSET(switch_stack_r26), bt)) { readmem(ksp + OFFSET(switch_stack_r26), KVADDR, &r26, sizeof(ulong), "ret_from_smp_fork check", FAULT_ON_ERROR); if (STREQ(closest_symbol(r26), "ret_from_smp_fork") || STREQ(closest_symbol(r26), "ret_from_smpfork")) { ip = r26; sp = ksp; } } } if (getsp) *getsp = sp; if (getpc) *getpc = ip; } /* * Fill the percpu_data structure with information from the * hwrpb/percpu_data structures for a given CPU. If requested, * return one of the specified entries. */ static ulong get_percpu_data(int cpu, ulong flag, struct percpu_data *pd) { ulong hwrpb, halt_ra, halt_PC, halt_pv; unsigned long processor_offset, processor_size; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); readmem(hwrpb+OFFSET(hwrpb_struct_processor_offset), KVADDR, &processor_offset, sizeof(ulong), "hwrpb processor_offset", FAULT_ON_ERROR); readmem(hwrpb+OFFSET(hwrpb_struct_processor_size), KVADDR, &processor_size, sizeof(ulong), "hwrpb processor_size", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_PC), KVADDR, &halt_PC, sizeof(ulong), "percpu halt_PC", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_ra), KVADDR, &halt_ra, sizeof(ulong), "percpu halt_ra", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_pv), KVADDR, &halt_pv, sizeof(ulong), "percpu halt_pv", FAULT_ON_ERROR); if (pd) { pd->halt_PC = halt_PC; pd->halt_ra = halt_ra; pd->halt_pv = halt_pv; } switch (flag) { case GET_HALT_PC: return halt_PC; case GET_HALT_RA: return halt_ra; case GET_HALT_PV: return halt_pv; default: return 0; } } /* * Translate a PTE, returning TRUE if the page is _PAGE_VALID or _PAGE_PRESENT, * whichever is appropriate for the machine type. If a physaddr pointer is * passed in, don't print anything. */ static int alpha_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; physaddr_t paddr; paddr = PTOB(pte >> 32); page_present = (pte & _PAGE_VALID); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_VALID) fprintf(fp, "%sVALID", others++ ? "|" : ""); if (pte & _PAGE_FOR) fprintf(fp, "%sFOR", others++ ? "|" : ""); if (pte & _PAGE_FOW) fprintf(fp, "%sFOW", others++ ? "|" : ""); if (pte & _PAGE_FOE) fprintf(fp, "%sFOE", others++ ? "|" : ""); if (pte & _PAGE_ASM) fprintf(fp, "%sASM", others++ ? "|" : ""); if (pte & _PAGE_KRE) fprintf(fp, "%sKRE", others++ ? "|" : ""); if (pte & _PAGE_URE) fprintf(fp, "%sURE", others++ ? "|" : ""); if (pte & _PAGE_KWE) fprintf(fp, "%sKWE", others++ ? "|" : ""); if (pte & _PAGE_UWE) fprintf(fp, "%sUWE", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * This is currently not machine-dependent, but eventually I'd prefer to use * the HWPCB for the real physical memory size. */ static uint64_t alpha_memory_size(void) { return (generic_memory_size()); } /* * Determine where vmalloc'd memory starts. */ static ulong alpha_vmalloc_start(void) { return VMALLOC_START; } /* * ALPHA tasks are all stacksize-aligned. */ static int alpha_is_task_addr(ulong task) { return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * Keep or reject a symbol from the kernel namelist. */ int alpha_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%016lx %s\n", value, name); return (name && strlen(name) && (value > MIN_SYMBOL_VALUE)); } /* * Override smp_num_cpus if possible and necessary. */ int alpha_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_online())) return cpus; else return kt->cpus; } /* * Machine dependent command. */ void alpha_cmd_mach(void) { int c, cflag; unsigned int radix; cflag = radix = 0; while ((c = getopt(argcnt, args, "cxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) display_hwrpb(radix); else alpha_display_machine_stats(); } /* * "mach" command output. */ static void alpha_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } /* * Display the hwrpb_struct and each percpu_struct. */ static void display_hwrpb(unsigned int radix) { int cpu; ulong hwrpb, percpu; ulong processor_offset, processor_size; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); readmem(hwrpb+OFFSET(hwrpb_struct_processor_offset), KVADDR, &processor_offset, sizeof(ulong), "hwrpb processor_offset", FAULT_ON_ERROR); readmem(hwrpb+OFFSET(hwrpb_struct_processor_size), KVADDR, &processor_size, sizeof(ulong), "hwrpb processor_size", FAULT_ON_ERROR); fprintf(fp, "HWRPB:\n"); dump_struct("hwrpb_struct", hwrpb, radix); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "\nCPU %d:\n", cpu); percpu = hwrpb + processor_offset + (processor_size * cpu); dump_struct("percpu_struct", percpu, radix); } } /* * Perform any leftover pre-prompt machine-specific initialization tasks here. */ static void alpha_post_init(void) { modify_signame(7, "SIGEMT", NULL); modify_signame(10, "SIGBUS", NULL); modify_signame(12, "SIGSYS", NULL); modify_signame(16, "SIGURG", NULL); modify_signame(17, "SIGSTOP", NULL); modify_signame(18, "SIGTSTP", NULL); modify_signame(19, "SIGCONT", NULL); modify_signame(20, "SIGCHLD", NULL); modify_signame(23, "SIGIO", "SIGPOLL"); modify_signame(29, "SIGINFO", "SIGPWR"); modify_signame(30, "SIGUSR1", NULL); modify_signame(31, "SIGUSR2", NULL); } #endif /* ALPHA */ crash-7.1.4/main.c0000775000000000000000000016766412634305150012441 0ustar rootroot/* main.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "xen_hyper_defs.h" #include #include #include static void setup_environment(int, char **); static int is_external_command(void); static int is_builtin_command(void); static int is_input_file(void); static void check_xen_hyper(void); static void show_untrusted_files(void); static void get_osrelease(char *); static void get_log(char *); static char *no_vmcoreinfo(const char *); static struct option long_options[] = { {"memory_module", required_argument, 0, 0}, {"memory_device", required_argument, 0, 0}, {"no_kallsyms", 0, 0, 0}, {"no_modules", 0, 0, 0}, {"help", optional_argument, 0, 'h'}, {"no_data_debug", 0, 0, 0}, {"no_crashrc", 0, 0, 0}, {"no_kmem_cache", 0, 0, 0}, {"kmem_cache_delay", 0, 0, 0}, {"readnow", 0, 0, 0}, {"smp", 0, 0, 0}, {"machdep", required_argument, 0, 0}, {"version", 0, 0, 0}, {"buildinfo", 0, 0, 0}, {"cpus", required_argument, 0, 0}, {"no_ikconfig", 0, 0, 0}, {"hyper", 0, 0, 0}, {"p2m_mfn", required_argument, 0, 0}, {"xen_phys_start", required_argument, 0, 0}, {"zero_excluded", 0, 0, 0}, {"no_panic", 0, 0, 0}, {"more", 0, 0, 0}, {"less", 0, 0, 0}, {"CRASHPAGER", 0, 0, 0}, {"no_scroll", 0, 0, 0}, {"reloc", required_argument, 0, 0}, {"kaslr", required_argument, 0, 0}, {"active", 0, 0, 0}, {"minimal", 0, 0, 0}, {"mod", required_argument, 0, 0}, {"kvmhost", required_argument, 0, 0}, {"kvmio", required_argument, 0, 0}, {"no_elf_notes", 0, 0, 0}, {"osrelease", required_argument, 0, 0}, {"log", required_argument, 0, 0}, {"hex", 0, 0, 0}, {"dec", 0, 0, 0}, {"no_strip", 0, 0, 0}, {"hash", required_argument, 0, 0}, {"offline", required_argument, 0, 0}, {"src", required_argument, 0, 0}, {0, 0, 0, 0} }; int main(int argc, char **argv) { int i, c, option_index; char *tmpname; setup_environment(argc, argv); /* * Get and verify command line options. */ opterr = 0; optind = 0; while((c = getopt_long(argc, argv, "Lkgh::e:i:sSvc:d:tfp:m:xo:", long_options, &option_index)) != -1) { switch (c) { case 0: if (STREQ(long_options[option_index].name, "memory_module")) pc->memory_module = optarg; else if (STREQ(long_options[option_index].name, "memory_device")) pc->memory_device = optarg; else if (STREQ(long_options[option_index].name, "no_kallsyms")) kt->flags |= NO_KALLSYMS; else if (STREQ(long_options[option_index].name, "no_modules")) kt->flags |= NO_MODULE_ACCESS; else if (STREQ(long_options[option_index].name, "no_ikconfig")) kt->flags |= NO_IKCONFIG; else if (STREQ(long_options[option_index].name, "no_data_debug")) pc->flags &= ~DATADEBUG; else if (STREQ(long_options[option_index].name, "no_kmem_cache")) vt->flags |= KMEM_CACHE_UNAVAIL; else if (STREQ(long_options[option_index].name, "kmem_cache_delay")) vt->flags |= KMEM_CACHE_DELAY; else if (STREQ(long_options[option_index].name, "readnow")) pc->flags |= READNOW; else if (STREQ(long_options[option_index].name, "smp")) kt->flags |= SMP; else if (STREQ(long_options[option_index].name, "machdep")) { for (i = 0; i < MAX_MACHDEP_ARGS; i++) { if (machdep->cmdline_args[i]) continue; machdep->cmdline_args[i] = optarg; break; } if (i == MAX_MACHDEP_ARGS) error(INFO, "option ignored: %s\n", optarg); } else if (STREQ(long_options[option_index].name, "version")) { pc->flags |= VERSION_QUERY; display_version(); display_gdb_banner(); clean_exit(0); } else if (STREQ(long_options[option_index].name, "buildinfo")) { dump_build_data(); clean_exit(0); } else if (STREQ(long_options[option_index].name, "cpus")) kt->cpus_override = optarg; else if (STREQ(long_options[option_index].name, "hyper")) pc->flags |= XEN_HYPER; else if (STREQ(long_options[option_index].name, "p2m_mfn")) xen_kdump_p2m_mfn(optarg); else if (STREQ(long_options[option_index].name, "xen_phys_start")) set_xen_phys_start(optarg); else if (STREQ(long_options[option_index].name, "zero_excluded")) *diskdump_flags |= ZERO_EXCLUDED; else if (STREQ(long_options[option_index].name, "no_elf_notes")) { if (machine_type("X86") || machine_type("X86_64")) *diskdump_flags |= NO_ELF_NOTES; else error(INFO, "--no_elf_notes is only applicable to " "the X86 and X86_64 architectures.\n"); } else if (STREQ(long_options[option_index].name, "no_panic")) tt->flags |= PANIC_TASK_NOT_FOUND; else if (STREQ(long_options[option_index].name, "no_strip")) st->flags |= NO_STRIP; else if (STREQ(long_options[option_index].name, "more")) { if ((pc->scroll_command != SCROLL_NONE) && file_exists("/bin/more", NULL)) pc->scroll_command = SCROLL_MORE; } else if (STREQ(long_options[option_index].name, "less")) { if ((pc->scroll_command != SCROLL_NONE) && file_exists("/usr/bin/less", NULL)) pc->scroll_command = SCROLL_LESS; } else if (STREQ(long_options[option_index].name, "CRASHPAGER")) { if ((pc->scroll_command != SCROLL_NONE) && CRASHPAGER_valid()) pc->scroll_command = SCROLL_CRASHPAGER; } else if (STREQ(long_options[option_index].name, "no_scroll")) pc->flags &= ~SCROLL; else if (STREQ(long_options[option_index].name, "no_crashrc")) pc->flags |= NOCRASHRC; else if (STREQ(long_options[option_index].name, "active")) tt->flags |= ACTIVE_ONLY; else if (STREQ(long_options[option_index].name, "mod")) kt->module_tree = optarg; else if (STREQ(long_options[option_index].name, "hash")) { if (!calculate(optarg, &pc->nr_hash_queues, NULL, 0)) { error(INFO, "invalid --hash argument: %s\n", optarg); } } else if (STREQ(long_options[option_index].name, "kaslr")) { if (!machine_type("X86_64")) error(INFO, "--kaslr only valid " "with X86_64 machine type.\n"); else if (STREQ(optarg, "auto")) kt->flags2 |= (RELOC_AUTO|KASLR); else { if (!calculate(optarg, &kt->relocate, NULL, 0)) { error(INFO, "invalid --kaslr argument: %s\n", optarg); program_usage(SHORT_FORM); } kt->relocate *= -1; kt->flags |= RELOC_SET; kt->flags2 |= KASLR; } } else if (STREQ(long_options[option_index].name, "reloc")) { if (!calculate(optarg, &kt->relocate, NULL, 0)) { error(INFO, "invalid --reloc argument: %s\n", optarg); program_usage(SHORT_FORM); } kt->flags |= RELOC_SET; } else if (STREQ(long_options[option_index].name, "minimal")) pc->flags |= MINIMAL_MODE; else if (STREQ(long_options[option_index].name, "kvmhost")) set_kvmhost_type(optarg); else if (STREQ(long_options[option_index].name, "kvmio")) set_kvm_iohole(optarg); else if (STREQ(long_options[option_index].name, "osrelease")) { pc->flags2 |= GET_OSRELEASE; get_osrelease(optarg); } else if (STREQ(long_options[option_index].name, "log")) { pc->flags2 |= GET_LOG; get_log(optarg); } else if (STREQ(long_options[option_index].name, "hex")) { pc->flags2 |= RADIX_OVERRIDE; pc->output_radix = 16; } else if (STREQ(long_options[option_index].name, "dec")) { pc->flags2 |= RADIX_OVERRIDE; pc->output_radix = 10; } else if (STREQ(long_options[option_index].name, "offline")) { if (STREQ(optarg, "show")) pc->flags2 &= ~OFFLINE_HIDE; else if (STREQ(optarg, "hide")) pc->flags2 |= OFFLINE_HIDE; else { error(INFO, "invalid --offline argument: %s\n", optarg); program_usage(SHORT_FORM); } } else if (STREQ(long_options[option_index].name, "src")) kt->source_tree = optarg; else { error(INFO, "internal error: option %s unhandled\n", long_options[option_index].name); program_usage(SHORT_FORM); } break; case 'f': st->flags |= FORCE_DEBUGINFO; break; case 'g': pc->flags |= KERNEL_DEBUG_QUERY; break; case 'h': /* note: long_getopt's handling of optional arguments is weak. * To it, an optional argument must be part of the same argument * as the flag itself (eg. --help=commands or -hcommands). * We want to accept "--help commands" or "-h commands". * So we must do that part ourselves. */ if (optarg != NULL) cmd_usage(optarg, COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP); else if (argv[optind] != NULL && argv[optind][0] != '-') cmd_usage(argv[optind++], COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP); else program_usage(LONG_FORM); clean_exit(0); case 'k': pc->flags |= KERNTYPES; break; case 'e': if (STREQ(optarg, "vi")) pc->editing_mode = "vi"; else if (STREQ(optarg, "emacs")) pc->editing_mode = "emacs"; else fprintf(fp, "invalid edit mode: %s\n", optarg); break; case 't': kt->flags2 |= GET_TIMESTAMP; break; case 'i': pc->input_file = optarg; pc->flags |= CMDLINE_IFILE; break; case 'v': pc->flags |= VERSION_QUERY; display_version(); display_gdb_banner(); clean_exit(0); case 's': pc->flags |= SILENT; pc->flags &= ~SCROLL; // pc->scroll_command = SCROLL_NONE; (why?) break; case 'L': if (mlockall(MCL_CURRENT|MCL_FUTURE) == -1) perror("mlockall"); break; case 'S': if (is_system_map("/boot/System.map")) { pc->system_map = "/boot/System.map"; pc->flags |= (SYSMAP|SYSMAP_ARG); } break; case 'c': create_console_device(optarg); break; case 'd': pc->debug = atol(optarg); set_lkcd_debug(pc->debug); set_vas_debug(pc->debug); break; case 'p': force_page_size(optarg); break; case 'm': for (i = 0; i < MAX_MACHDEP_ARGS; i++) { if (machdep->cmdline_args[i]) continue; machdep->cmdline_args[i] = optarg; break; } if (i == MAX_MACHDEP_ARGS) error(INFO, "option ignored: %s\n", optarg); break; case 'x': pc->flags |= PRELOAD_EXTENSIONS; break; case 'o': ramdump_elf_output_file(optarg); break; default: error(INFO, "invalid option: %s\n", argv[optind-1]); program_usage(SHORT_FORM); } } opterr = 1; display_version(); /* * Take the kernel and dumpfile arguments in either order. */ while (argv[optind]) { if (is_ramdump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->dumpfile = ramdump_to_elf(); if (is_kdump(pc->dumpfile, KDUMP_LOCAL)) { pc->flags |= KDUMP; if (is_ramdump_image()) pc->readmem = read_ramdump; else pc->readmem = read_kdump; pc->writemem = NULL; } else { error(INFO, "malformed ELF file: %s\n", pc->dumpfile); program_usage(SHORT_FORM); } optind++; continue; } if (is_remote_daemon(argv[optind])) { if (pc->flags & DUMPFILE_TYPES) { error(INFO, "too many dumpfile/memory arguments\n"); program_usage(SHORT_FORM); } pc->flags2 |= REMOTE_DAEMON; optind++; continue; } if (STREQ(argv[optind], "/dev/crash")) { pc->memory_device = argv[optind]; optind++; continue; } if (!file_exists(argv[optind], NULL)) { error(INFO, "%s: %s\n", argv[optind], strerror(ENOENT)); program_usage(SHORT_FORM); } else if (is_directory(argv[optind])) { error(INFO, "%s: not a supported file format\n", argv[optind]); program_usage(SHORT_FORM); } else if (!is_readable(argv[optind])) program_usage(SHORT_FORM); if (is_kernel(argv[optind])) { if (pc->namelist || pc->server_namelist) { if (!select_namelist(argv[optind])) { error(INFO, "too many namelist arguments\n"); program_usage(SHORT_FORM); } } else pc->namelist = argv[optind]; } else if (is_compressed_kernel(argv[optind], &tmpname)) { if (pc->namelist) { if (!select_namelist(tmpname)) { error(INFO, "too many namelist arguments\n"); program_usage(SHORT_FORM); } if (pc->namelist_debug == tmpname) { pc->namelist_debug_orig = argv[optind]; } else { pc->namelist_debug_orig = pc->namelist_orig; pc->namelist_orig = argv[optind]; } } else { pc->namelist = tmpname; pc->namelist_orig = argv[optind]; } pc->cleanup = NULL; } else if (!(pc->flags & KERNEL_DEBUG_QUERY)) { if (is_flattened_format(argv[optind])) pc->flags2 |= FLAT; if (STREQ(argv[optind], "/dev/mem")) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= DEVMEM; pc->dumpfile = NULL; pc->readmem = read_dev_mem; pc->writemem = write_dev_mem; pc->live_memsrc = argv[optind]; } else if (is_proc_kcore(argv[optind], KCORE_LOCAL)) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= PROC_KCORE; pc->dumpfile = NULL; pc->readmem = read_proc_kcore; pc->writemem = write_proc_kcore; pc->live_memsrc = argv[optind]; } else if (is_netdump(argv[optind], NETDUMP_LOCAL)) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= NETDUMP; pc->dumpfile = argv[optind]; if (is_sadump_xen()) { pc->readmem = read_kdump; pc->writemem = write_kdump; } else { pc->readmem = read_netdump; pc->writemem = write_netdump; } } else if (is_kdump(argv[optind], KDUMP_LOCAL)) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= KDUMP; pc->dumpfile = argv[optind]; pc->readmem = read_kdump; pc->writemem = write_kdump; } else if (is_kvmdump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= KVMDUMP; pc->dumpfile = argv[optind]; pc->readmem = read_kvmdump; pc->writemem = write_kvmdump; } else if (is_kvmdump_mapfile(argv[optind])) { if (pc->kvmdump_mapfile) { error(INFO, "too many KVM map file arguments\n"); program_usage(SHORT_FORM); } pc->kvmdump_mapfile = argv[optind]; } else if (is_xendump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= XENDUMP; pc->dumpfile = argv[optind]; pc->readmem = read_xendump; pc->writemem = write_xendump; } else if (is_system_map(argv[optind])) { pc->system_map = argv[optind]; pc->flags |= (SYSMAP|SYSMAP_ARG); } else if (is_diskdump(argv[optind])) { if ((pc->flags & MEMORY_SOURCES) && (!dumpfile_is_split())) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= DISKDUMP; pc->dumpfile = argv[optind]; pc->readmem = read_diskdump; pc->writemem = write_diskdump; } else if (is_lkcd_compressed_dump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= LKCD; pc->dumpfile = argv[optind]; pc->readmem = read_lkcd_dumpfile; pc->writemem = write_lkcd_dumpfile; } else if (is_mclx_compressed_dump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= MCLXCD; pc->dumpfile = argv[optind]; pc->readmem = read_mclx_dumpfile; pc->writemem = write_mclx_dumpfile; } else if (is_s390_dump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= S390D; pc->dumpfile = argv[optind]; pc->readmem = read_s390_dumpfile; pc->writemem = write_s390_dumpfile; } else if (is_sadump(argv[optind])) { if ((pc->flags & MEMORY_SOURCES) && !sadump_is_diskset()) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= SADUMP; pc->dumpfile = argv[optind]; pc->readmem = read_sadump; pc->writemem = write_sadump; } else if (is_vmware_vmss(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= VMWARE_VMSS; pc->dumpfile = argv[optind]; pc->readmem = read_vmware_vmss; pc->writemem = write_vmware_vmss; } else { error(INFO, "%s: not a supported file format\n", argv[optind]); program_usage(SHORT_FORM); } } optind++; } check_xen_hyper(); if (setjmp(pc->main_loop_env)) clean_exit(1); /* * Initialize various subsystems. */ fd_init(); buf_init(); cmdline_init(); mem_init(); hq_init(); machdep_init(PRE_SYMTAB); symtab_init(); paravirt_init(); machdep_init(PRE_GDB); datatype_init(); /* * gdb_main_loop() modifies "command_loop_hook" to point to the * main_loop() function below, and then calls gdb's main() function. * After gdb initializes itself, it calls back to main_loop(). */ gdb_main_loop(argc, argv); clean_exit(0); exit(0); } /* * This routine is called from above, but also will be re-entered * as part of gdb's SIGINT handling. Since GDB_INIT and RUNTIME * will be set on re-entrancy, the initialization routines won't * be called. This can be avoided by always making gdb ignore SIGINT. */ void main_loop(void) { if (pc->flags2 & ERASEINFO_DATA) error(WARNING, "\n%s:\n " "Kernel data has been erased from this dumpfile. This may " "cause\n the crash session to fail entirely, may " "cause commands to fail,\n or may result in " "unpredictable\n runtime behavior.\n", pc->dumpfile); if (pc->flags2 & INCOMPLETE_DUMP) { error(WARNING, "\n%s:\n " "This dumpfile is incomplete. This may cause the crash session" "\n to fail entirely, may cause commands to fail, or may" " result in\n unpredictable runtime behavior.\n", pc->dumpfile); if (!(*diskdump_flags & ZERO_EXCLUDED)) fprintf(fp, " NOTE: This dumpfile may be analyzed with the --zero_excluded command\n" " line option, in which case any read requests from missing pages\n" " will return zero-filled memory.\n"); } if (pc->flags2 & EXCLUDED_VMEMMAP) { error(WARNING, "\n%s:\n " "This dumpfile is incomplete because the page structures associated\n" " with excluded pages may also be excluded. This may cause the crash\n" " session to fail entirely, may cause commands to fail (most notably\n" " the \"kmem\" command), or may result in unpredictable runtime behavior.\n", pc->dumpfile); } if (!(pc->flags & GDB_INIT)) { gdb_session_init(); show_untrusted_files(); kdump_backup_region_init(); if (XEN_HYPER_MODE()) { #ifdef XEN_HYPERVISOR_ARCH machdep_init(POST_GDB); xen_hyper_init(); machdep_init(POST_INIT); #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } else if (!(pc->flags & MINIMAL_MODE)) { read_in_kernel_config(IKCFG_INIT); kernel_init(); machdep_init(POST_GDB); vm_init(); machdep_init(POST_VM); module_init(); help_init(); task_init(); vfs_init(); net_init(); dev_init(); machdep_init(POST_INIT); } } else SIGACTION(SIGINT, restart, &pc->sigaction, NULL); /* * Display system statistics and current context. */ if (!(pc->flags & SILENT) && !(pc->flags & RUNTIME)) { if (XEN_HYPER_MODE()) { #ifdef XEN_HYPERVISOR_ARCH xen_hyper_display_sys_stats(); xen_hyper_show_vcpu_context(XEN_HYPER_VCPU_LAST_CONTEXT()); fprintf(fp, "\n"); #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } else if (!(pc->flags & MINIMAL_MODE)) { display_sys_stats(); show_context(CURRENT_CONTEXT()); fprintf(fp, "\n"); } } if (pc->flags & MINIMAL_MODE) error(NOTE, "minimal mode commands: log, dis, rd, sym, eval, set, extend and exit\n\n"); pc->flags |= RUNTIME; if (pc->flags & PRELOAD_EXTENSIONS) preload_extensions(); /* * Return here if a non-recoverable error occurs * during command execution. */ if (setjmp(pc->main_loop_env)) { ; } /* * process_command_line() reads, parses and stores input command lines * in the global args[] array. exec_command() figures out what to * do with the parsed line. */ while (TRUE) { process_command_line(); exec_command(); } } /* * Most of the time args[0] simply contains the name string of a command * found in the global command_table[]. Special consideration is done for * dealing with input files, "known" external commands, and built-in commands. * If none of the above apply, the args[0] string is checked against the * known list of structure, union and typedef names, and if found, passed * on to cmd_struct(), cmd_union() or cmd_whatis(). */ void exec_command(void) { struct command_table_entry *ct; struct args_input_file args_ifile; if (args[0] && (args[0][0] == '\\') && args[0][1]) { shift_string_left(args[0], 1); shift_string_left(pc->orig_line, 1); pc->curcmd_flags |= NO_MODIFY; } reattempt: if (!args[0]) return; optind = argerrs = 0; if ((ct = get_command_table_entry(args[0]))) { if (ct->flags & REFRESH_TASK_TABLE) { if (XEN_HYPER_MODE()) { #ifdef XEN_HYPERVISOR_ARCH xen_hyper_refresh_domain_context_space(); xen_hyper_refresh_vcpu_context_space(); #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } else if (!(pc->flags & MINIMAL_MODE)) { tt->refresh_task_table(); sort_context_array(); sort_tgid_array(); } } if (!STREQ(pc->curcmd, pc->program_name)) pc->lastcmd = pc->curcmd; pc->curcmd = ct->name; pc->cmdgencur++; if (is_args_input_file(ct, &args_ifile)) exec_args_input_file(ct, &args_ifile); else (*ct->func)(); pc->lastcmd = pc->curcmd; pc->curcmd = pc->program_name; return; } if (is_input_file()) return; if (is_external_command()) return; if (is_builtin_command()) return; if (is_datatype_command()) goto reattempt; if (STRNEQ(args[0], "#") || STRNEQ(args[0], "//")) return; if (!(pc->flags & MINIMAL_MODE) && is_gdb_command(TRUE, FAULT_ON_ERROR)) goto reattempt; if (REMOTE() && remote_execute()) return; pc->curcmd = pc->program_name; if (pc->flags & MINIMAL_MODE) error(INFO, "%s: command not available in minimal mode\n" "NOTE: minimal mode commands: log, dis, rd, sym, eval, set, extend and exit\n", args[0]); else error(INFO, "command not found: %s\n", args[0]); if (pc->curcmd_flags & REPEAT) pc->curcmd_flags &= ~REPEAT; } /* * Find the command_table structure associated with a command name. */ struct command_table_entry * get_command_table_entry(char *name) { int i; struct command_table_entry *cp; struct extension_table *ext; if (pc->flags2 & GDB_CMD_MODE) { if (STREQ(name, "crash")) { if (argcnt == 1) error(FATAL, "a crash command must follow " "the \"crash\" directive\n"); for (i = 1; i <= argcnt; i++) args[i-1] = args[i]; argcnt--; name = args[0]; } else name = "gdb"; } for (cp = pc->cmd_table; cp->name; cp++) { if (STREQ(cp->name, name)) { if (!(pc->flags & MINIMAL_MODE) || (cp->flags & MINIMAL)) return cp; else return NULL; } } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (STREQ(cp->name, name)) { if (!(pc->flags & MINIMAL_MODE) || (cp->flags & MINIMAL)) return cp; else return NULL; } } } return NULL; } static int is_input_file(void) { if (STREQ(args[0], "<")) { exec_input_file(); return TRUE; } return FALSE; } static int is_builtin_command(void) { int i; struct remote_file remote_file, *rfp; /* * cmd_test() is used strictly for debugging -- but not advertised * in the help menu. */ if (STREQ(args[0], "test")) { pc->curcmd = "test"; cmd_test(); return TRUE; } if (STREQ(args[0], "save")) { pc->curcmd = "save"; rfp = &remote_file; BZERO(rfp, sizeof(struct remote_file)); rfp->flags |= REMOTE_VERBOSE; for (i = 1; i < argcnt; i++) { rfp->filename = args[i]; get_remote_file(rfp); } return TRUE; } return FALSE; } /* * Pure laziness -- to avoid having to type the exclamation point at the * beginning of the line. */ static int is_external_command(void) { int i; char *cmd; char command[BUFSIZE]; cmd = args[0]; if (STREQ(cmd, "vi") || STREQ(cmd, "pwd") || STREQ(cmd, "grep") || STREQ(cmd, "cat") || STREQ(cmd, "more") || STREQ(cmd, "less") || STREQ(cmd, "echo") || STREQ(cmd, "ls")) { sprintf(command, "%s", cmd); for (i = 1; i < argcnt; i++) { strcat(command, " "); if (strstr(args[i], " ")) { strcat(command, "\""); strcat(command, args[i]); strcat(command, "\""); } else strcat(command, args[i]); } if (system(command) == -1) perror(command); return TRUE; } return FALSE; } void cmd_quit(void) { if (REMOTE()) remote_exit(); clean_exit(0); } void cmd_mach(void) { machdep->cmd_mach(); } static void setup_environment(int argc, char **argv) { int i; char *p1; char buf[BUFSIZE]; char homerc[BUFSIZE]; char localrc[BUFSIZE]; FILE *afp; char *program; program = argv[0]; /* * Program output typically goes via "fprintf(fp, ...)", but the * contents of fp are modified on the fly to handle redirection * to pipes or output files. */ fp = stdout; /* * Start populating the program_context structure. It's used so * frequently that "pc" has been declared globally to point to the * "program_context" structure. */ pc->program_name = (char *)basename(program); pc->program_path = program; pc->program_version = build_version; pc->program_pid = (ulong)getpid(); pc->curcmd = pc->program_name; pc->flags = (HASH|SCROLL); pc->flags |= DATADEBUG; /* default until unnecessary */ pc->confd = -2; pc->machine_type = MACHINE_TYPE; pc->readmem = read_dev_mem; /* defaults until argv[] is parsed */ pc->writemem = write_dev_mem; pc->read_vmcoreinfo = no_vmcoreinfo; pc->memory_module = NULL; pc->memory_device = MEMORY_DRIVER_DEVICE; machdep->bits = sizeof(long) * 8; machdep->verify_paddr = generic_verify_paddr; machdep->get_kvaddr_ranges = generic_get_kvaddr_ranges; pc->redhat_debug_loc = DEFAULT_REDHAT_DEBUG_LOCATION; pc->cmdgencur = 0; pc->cmd_table = linux_command_table; kt->BUG_bytes = -1; kt->flags |= PRE_KERNEL_INIT; /* * Set up to perform a clean_exit() upon parent death. */ SIGACTION(SIGUSR2, restart, &pc->sigaction, NULL); prctl(PR_SET_PDEATHSIG, SIGUSR2); /* * Get gdb version before initializing it since this might be one * of the short-hand commands that need it without running gdb. */ get_gdb_version(); /* * Set up the default scrolling behavior for terminal output. */ if (isatty(fileno(stdout))) { if (CRASHPAGER_valid()) { pc->flags |= SCROLL; pc->scroll_command = SCROLL_CRASHPAGER; } else if (file_exists("/usr/bin/less", NULL)) { pc->flags |= SCROLL; pc->scroll_command = SCROLL_LESS; } else if (file_exists("/bin/more", NULL)) { pc->flags |= SCROLL; pc->scroll_command = SCROLL_MORE; } else { pc->scroll_command = SCROLL_NONE; pc->flags &= ~SCROLL; } } /* * Setup the readline command line editing mode based upon the * following order: * * (1) EDITOR environment variable * (2) overridden by any .crashrc entry: "set vi" or "set emacs" * (3) RL_VI_MODE if not set anywhere else */ pc->flags |= READLINE; pc->editing_mode = "no_mode"; if ((p1 = getenv("EDITOR"))) { if (strstr(p1, "vi")) pc->editing_mode = "vi"; if (strstr(p1, "emacs")) pc->editing_mode = "emacs"; } /* * Resolve $HOME .rc file first, then the one in the local directory. * Note that only "set" and "alias" commands are done at this time. */ for (i = 1; i < argc; i++) if (STREQ(argv[i], "--no_crashrc")) pc->flags |= NOCRASHRC; alias_init(NULL); if ((p1 = getenv("HOME"))) { if ((pc->home = (char *)malloc(strlen(p1)+1)) == NULL) { error(INFO, "home directory malloc: %s\n", strerror(errno)); pc->home = "(unknown)"; } else strcpy(pc->home, p1); sprintf(homerc, "%s/.%src", pc->home, pc->program_name); if (!(pc->flags & NOCRASHRC) && file_exists(homerc, NULL)) { if ((afp = fopen(homerc, "r")) == NULL) error(INFO, "cannot open %s: %s\n", homerc, strerror(errno)); else if (untrusted_file(afp, homerc)) fclose(afp); else { while (fgets(buf, BUFSIZE, afp)) resolve_rc_cmd(buf, ALIAS_RCHOME); fclose(afp); } } } sprintf(localrc, ".%src", pc->program_name); if (!same_file(homerc, localrc) && !(pc->flags & NOCRASHRC) && file_exists(localrc, NULL)) { if ((afp = fopen(localrc, "r")) == NULL) error(INFO, "cannot open %s: %s\n", localrc, strerror(errno)); else if (untrusted_file(afp, localrc)) fclose(afp); else { while (fgets(buf, BUFSIZE, afp)) resolve_rc_cmd(buf, ALIAS_RCLOCAL); fclose(afp); } } if (STREQ(pc->editing_mode, "no_mode")) pc->editing_mode = "vi"; machdep_init(SETUP_ENV); } /* * "help -p" output */ void dump_program_context(void) { int i; int others = 0; char *p1; char buf[BUFSIZE]; char buf2[BUFSIZE]; fprintf(fp, " program_name: %s\n", pc->program_name); fprintf(fp, " program_path: %s\n", pc->program_path); fprintf(fp, " program_version: %s\n", pc->program_version); fprintf(fp, " gdb_version: %s\n", pc->gdb_version); fprintf(fp, " program_pid: %ld\n", pc->program_pid); fprintf(fp, " prompt: \"%s\"\n", pc->prompt); fprintf(fp, " flags: %llx ", pc->flags); if (pc->flags) sprintf(buf, "("); if (pc->flags & RUNTIME) sprintf(&buf[strlen(buf)], "%sRUNTIME", others++ ? "|" : ""); if (pc->flags & LIVE_SYSTEM) sprintf(&buf[strlen(buf)], "%sLIVE_SYSTEM", others++ ? "|" : ""); if (pc->flags & TTY) sprintf(&buf[strlen(buf)], "%sTTY", others++ ? "|" : ""); if (pc->flags & IN_FOREACH) sprintf(&buf[strlen(buf)], "%sIN_FOREACH", others++ ? "|" : ""); if (pc->flags & MFD_RDWR) sprintf(&buf[strlen(buf)], "%sMFD_RDWR", others++ ? "|" : ""); if (pc->flags & KVMDUMP) sprintf(&buf[strlen(buf)], "%sKVMDUMP", others++ ? "|" : ""); if (pc->flags & SILENT) sprintf(&buf[strlen(buf)], "%sSILENT", others++ ? "|" : ""); if (pc->flags & HASH) sprintf(&buf[strlen(buf)], "%sHASH", others++ ? "|" : ""); if (pc->flags & SCROLL) sprintf(&buf[strlen(buf)], "%sSCROLL", others++ ? "|" : ""); if (pc->flags & NO_CONSOLE) sprintf(&buf[strlen(buf)], "%sNO_CONSOLE", others++ ? "|" : ""); if (pc->flags & MCLXCD) sprintf(&buf[strlen(buf)], "%sMCLXCD", others++ ? "|" : ""); if (pc->flags & RUNTIME_IFILE) sprintf(&buf[strlen(buf)], "%sRUNTIME_IFILE", others++ ? "|" : ""); if (pc->flags & CMDLINE_IFILE) sprintf(&buf[strlen(buf)], "%sCMDLINE_IFILE", others++ ? "|" : ""); if (pc->flags & DROP_CORE) sprintf(&buf[strlen(buf)], "%sDROP_CORE", others++ ? "|" : ""); if (pc->flags & LKCD) sprintf(&buf[strlen(buf)], "%sLKCD", others++ ? "|" : ""); if (pc->flags & GDB_INIT) sprintf(&buf[strlen(buf)], "%sGDB_INIT", others++ ? "|" : ""); if (pc->flags & IN_GDB) sprintf(&buf[strlen(buf)], "%sIN_GDB", others++ ? "|" : ""); if (pc->flags & RCHOME_IFILE) sprintf(&buf[strlen(buf)], "%sRCHOME_IFILE", others++ ? "|" : ""); if (pc->flags & RCLOCAL_IFILE) sprintf(&buf[strlen(buf)], "%sRCLOCAL_IFILE", others++ ? "|" : ""); if (pc->flags & READLINE) sprintf(&buf[strlen(buf)], "%sREADLINE", others++ ? "|" : ""); if (pc->flags & _SIGINT_) sprintf(&buf[strlen(buf)], "%s_SIGINT_", others++ ? "|" : ""); if (pc->flags & IN_RESTART) sprintf(&buf[strlen(buf)], "%sIN_RESTART", others++ ? "|" : ""); if (pc->flags & KERNEL_DEBUG_QUERY) sprintf(&buf[strlen(buf)], "%sKERNEL_DEBUG_QUERY", others++ ? "|" : ""); if (pc->flags & DEVMEM) sprintf(&buf[strlen(buf)], "%sDEVMEM", others++ ? "|" : ""); if (pc->flags & MEMMOD) sprintf(&buf[strlen(buf)], "%sMEMMOD", others++ ? "|" : ""); if (pc->flags & MODPRELOAD) sprintf(&buf[strlen(buf)], "%sMODPRELOAD", others++ ? "|" : ""); if (pc->flags & REM_LIVE_SYSTEM) sprintf(&buf[strlen(buf)], "%sREM_LIVE_SYSTEM", others++ ? "|" : ""); if (pc->flags & MEMSRC_LOCAL) sprintf(&buf[strlen(buf)], "%sMEMSRC_LOCAL", others++ ? "|" : ""); if (pc->flags & NAMELIST_LOCAL) sprintf(&buf[strlen(buf)], "%sNAMELIST_LOCAL", others++ ? "|" : ""); if (pc->flags & DUMPFILE_SAVED) sprintf(&buf[strlen(buf)], "%sDUMPFILE_SAVED", others++ ? "|" : ""); if (pc->flags & NAMELIST_SAVED) sprintf(&buf[strlen(buf)], "%sNAMELIST_SAVED", others++ ? "|" : ""); if (pc->flags & UNLINK_NAMELIST) sprintf(&buf[strlen(buf)], "%sUNLINK_NAMELIST", others++ ? "|" : ""); if (pc->flags & NAMELIST_UNLINKED) sprintf(&buf[strlen(buf)], "%sNAMELIST_UNLINKED", others++ ? "|" : ""); if (pc->flags & REM_MCLXCD) sprintf(&buf[strlen(buf)], "%sREM_MCLXCD", others++ ? "|" : ""); if (pc->flags & REM_LKCD) sprintf(&buf[strlen(buf)], "%sREM_LKCD", others++ ? "|" : ""); if (pc->flags & NAMELIST_NO_GZIP) sprintf(&buf[strlen(buf)], "%sNAMELIST_NO_GZIP", others++ ? "|" : ""); if (pc->flags & UNLINK_MODULES) sprintf(&buf[strlen(buf)], "%sUNLINK_MODULES", others++ ? "|" : ""); if (pc->flags & S390D) sprintf(&buf[strlen(buf)], "%sS390D", others++ ? "|" : ""); if (pc->flags & REM_S390D) sprintf(&buf[strlen(buf)], "%sREM_S390D", others++ ? "|" : ""); if (pc->flags & NETDUMP) sprintf(&buf[strlen(buf)], "%sNETDUMP", others++ ? "|" : ""); if (pc->flags & XENDUMP) sprintf(&buf[strlen(buf)], "%sXENDUMP", others++ ? "|" : ""); if (pc->flags & KDUMP) sprintf(&buf[strlen(buf)], "%sKDUMP", others++ ? "|" : ""); if (pc->flags & SADUMP) sprintf(&buf[strlen(buf)], "%sSADUMP", others++ ? "|" : ""); if (pc->flags & SYSRQ) sprintf(&buf[strlen(buf)], "%sSYSRQ", others++ ? "|" : ""); if (pc->flags & REM_NETDUMP) sprintf(&buf[strlen(buf)], "%sREM_NETDUMP", others++ ? "|" : ""); if (pc->flags & DISKDUMP) sprintf(&buf[strlen(buf)], "%sDISKDUMP", others++ ? "|" : ""); if (pc->flags & SYSMAP) sprintf(&buf[strlen(buf)], "%sSYSMAP", others++ ? "|" : ""); if (pc->flags & SYSMAP_ARG) sprintf(&buf[strlen(buf)], "%sSYSMAP_ARG", others++ ? "|" : ""); if (pc->flags & DATADEBUG) sprintf(&buf[strlen(buf)], "%sDATADEBUG", others++ ? "|" : ""); if (pc->flags & FINDKERNEL) sprintf(&buf[strlen(buf)], "%sFINDKERNEL", others++ ? "|" : ""); if (pc->flags & VERSION_QUERY) sprintf(&buf[strlen(buf)], "%sVERSION_QUERY", others++ ? "|" : ""); if (pc->flags & READNOW) sprintf(&buf[strlen(buf)], "%sREADNOW", others++ ? "|" : ""); if (pc->flags & NOCRASHRC) sprintf(&buf[strlen(buf)], "%sNOCRASHRC", others++ ? "|" : ""); if (pc->flags & INIT_IFILE) sprintf(&buf[strlen(buf)], "%sINIT_IFILE", others++ ? "|" : ""); if (pc->flags & XEN_HYPER) sprintf(&buf[strlen(buf)], "%sXEN_HYPER", others++ ? "|" : ""); if (pc->flags & XEN_CORE) sprintf(&buf[strlen(buf)], "%sXEN_CORE", others++ ? "|" : ""); if (pc->flags & PLEASE_WAIT) sprintf(&buf[strlen(buf)], "%sPLEASE_WAIT", others++ ? "|" : ""); if (pc->flags & IFILE_ERROR) sprintf(&buf[strlen(buf)], "%sIFILE_ERROR", others++ ? "|" : ""); if (pc->flags & MINIMAL_MODE) sprintf(&buf[strlen(buf)], "%sMINIMAL_MODE", others++ ? "|" : ""); if (pc->flags & CRASHBUILTIN) sprintf(&buf[strlen(buf)], "%sCRASHBUILTIN", others++ ? "|" : ""); if (pc->flags & PRELOAD_EXTENSIONS) sprintf(&buf[strlen(buf)], "%sPRELOAD_EXTENSIONS", others++ ? "|" : ""); if (pc->flags & PROC_KCORE) sprintf(&buf[strlen(buf)], "%sPROC_KCORE", others++ ? "|" : ""); if (pc->flags) strcat(buf, ")"); if (strlen(buf)) { if (strlen(buf) > 46) { sprintf(buf2, "\n%s\n", mkstring(buf, 80, CENTER|LJUST, NULL)); if (strlen(buf2) <= 82) fprintf(fp, "%s", buf2); else { for (i = strlen(buf2)-1; i; i--) { if ((buf2[i] == '|') && (i < 80)) break; } strcpy(buf, buf2); buf[i+1] = NULLCHAR; fprintf(fp, "%s\n %s", buf, &buf2[i+1]); } } else fprintf(fp, "%s\n", buf); } others = 0; fprintf(fp, " flags2: %llx (", pc->flags2); if (pc->flags2 & FLAT) fprintf(fp, "%sFLAT", others++ ? "|" : ""); if (pc->flags2 & ELF_NOTES) fprintf(fp, "%sELF_NOTES", others++ ? "|" : ""); if (pc->flags2 & GET_OSRELEASE) fprintf(fp, "%sGET_OSRELEASE", others++ ? "|" : ""); if (pc->flags2 & REMOTE_DAEMON) fprintf(fp, "%sREMOTE_DAEMON", others++ ? "|" : ""); if (pc->flags2 & LIVE_DUMP) fprintf(fp, "%sLIVE_DUMP", others++ ? "|" : ""); if (pc->flags2 & RADIX_OVERRIDE) fprintf(fp, "%sRADIX_OVERRIDE", others++ ? "|" : ""); if (pc->flags2 & QEMU_MEM_DUMP_ELF) fprintf(fp, "%sQEMU_MEM_DUMP_ELF", others++ ? "|" : ""); if (pc->flags2 & QEMU_MEM_DUMP_COMPRESSED) fprintf(fp, "%sQEMU_MEM_DUMP_COMPRESSED", others++ ? "|" : ""); if (pc->flags2 & GET_LOG) fprintf(fp, "%sGET_LOG", others++ ? "|" : ""); if (pc->flags2 & VMCOREINFO) fprintf(fp, "%sVMCOREINFO", others++ ? "|" : ""); if (pc->flags2 & ALLOW_FP) fprintf(fp, "%sALLOW_FP", others++ ? "|" : ""); if (pc->flags2 & RAMDUMP) fprintf(fp, "%sRAMDUMP", others++ ? "|" : ""); if (pc->flags2 & OFFLINE_HIDE) fprintf(fp, "%sOFFLINE_HIDE", others++ ? "|" : ""); if (pc->flags2 & INCOMPLETE_DUMP) fprintf(fp, "%sINCOMPLETE_DUMP", others++ ? "|" : ""); if (pc->flags2 & SNAP) fprintf(fp, "%sSNAP", others++ ? "|" : ""); if (pc->flags2 & EXCLUDED_VMEMMAP) fprintf(fp, "%sEXCLUDED_VMEMMAP", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " namelist: %s\n", pc->namelist); fprintf(fp, " dumpfile: %s\n", pc->dumpfile); fprintf(fp, " live_memsrc: %s\n", pc->live_memsrc); fprintf(fp, " system_map: %s\n", pc->system_map); fprintf(fp, " namelist_debug: %s\n", pc->namelist_debug); fprintf(fp, " debuginfo_file: %s\n", pc->debuginfo_file); fprintf(fp, " namelist_orig: %s\n", pc->namelist_orig); fprintf(fp, "namelist_dbg_orig: %s\n", pc->namelist_debug_orig); fprintf(fp, " kvmdump_mapfile: %s\n", pc->kvmdump_mapfile); fprintf(fp, " memory_module: %s\n", pc->memory_module); fprintf(fp, " memory_device: %s\n", pc->memory_device); fprintf(fp, " machine_type: %s\n", pc->machine_type); fprintf(fp, " editing_mode: %s\n", pc->editing_mode); fprintf(fp, " nfd: %d\n", pc->nfd); fprintf(fp, " mfd: %d\n", pc->mfd); fprintf(fp, " kfd: %d\n", pc->kfd); fprintf(fp, " dfd: %d\n", pc->dfd); fprintf(fp, " confd: %d\n", pc->confd); fprintf(fp, " home: %s\n", pc->home); fprintf(fp, " command_line: "); if (STRNEQ(pc->command_line, args[0])) fprintf(fp, "%s\n", concat_args(buf, 0, FALSE)); else fprintf(fp, "%s\n", pc->command_line); fprintf(fp, " orig_line: %s\n", pc->orig_line); fprintf(fp, " eoc_index: %d\n", pc->eoc_index); fprintf(fp, " readline: %lx\n", (ulong)pc->readline); fprintf(fp, " my_tty: %s\n", pc->my_tty); fprintf(fp, " debug: %ld\n", pc->debug); fprintf(fp, " debug_save: %ld\n", pc->debug_save); fprintf(fp, " console: %s\n", pc->console); fprintf(fp, " redhat_debug_loc: %s\n", pc->redhat_debug_loc); fprintf(fp, " pipefd[2]: %d,%d\n", pc->pipefd[0], pc->pipefd[1]); fprintf(fp, " nullfp: %lx\n", (ulong)pc->nullfp); fprintf(fp, " stdpipe: %lx\n", (ulong)pc->stdpipe); fprintf(fp, " pipe: %lx\n", (ulong)pc->pipe); fprintf(fp, " ifile: %lx\n", (ulong)pc->ifile); fprintf(fp, " ofile: %lx\n", (ulong)pc->ofile); fprintf(fp, " ifile_pipe: %lx\n", (ulong)pc->ifile_pipe); fprintf(fp, " ifile_ofile: %lx\n", (ulong)pc->ifile_ofile); fprintf(fp, " args_ifile: %lx\n", (ulong)pc->args_ifile); fprintf(fp, " input_file: %s\n", pc->input_file); fprintf(fp, "ifile_in_progress: %lx (", pc->ifile_in_progress); others = 0; if (pc->ifile_in_progress & RCHOME_IFILE) fprintf(fp, "%sRCHOME_IFILE", others++ ? "|" : ""); if (pc->ifile_in_progress & RCLOCAL_IFILE) fprintf(fp, "%sRCLOCAL_IFILE", others++ ? "|" : ""); if (pc->ifile_in_progress & CMDLINE_IFILE) fprintf(fp, "%sCMDLINE_IFILE", others++ ? "|" : ""); if (pc->ifile_in_progress & RUNTIME_IFILE) fprintf(fp, "%sRUNTIME_IFILE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " ifile_offset: %lld\n", (ulonglong)pc->ifile_offset); fprintf(fp, "runtime_ifile_cmd: %s\n", pc->runtime_ifile_cmd ? pc->runtime_ifile_cmd : "(unused)"); fprintf(fp, " scroll_command: "); switch (pc->scroll_command) { case SCROLL_NONE: fprintf(fp, "SCROLL_NONE\n"); break; case SCROLL_LESS: fprintf(fp, "SCROLL_LESS\n"); break; case SCROLL_MORE: fprintf(fp, "SCROLL_MORE\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "SCROLL_CRASHPAGER (%s)\n", getenv("CRASHPAGER")); break; } buf[0] = NULLCHAR; fprintf(fp, " redirect: %lx ", pc->redirect); if (pc->redirect) sprintf(buf, "("); others = 0; if (pc->redirect & FROM_COMMAND_LINE) sprintf(&buf[strlen(buf)], "%sFROM_COMMAND_LINE", others++ ? "|" : ""); if (pc->redirect & FROM_INPUT_FILE) sprintf(&buf[strlen(buf)], "%sFROM_INPUT_FILE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_NOT_DONE) sprintf(&buf[strlen(buf)], "%sREDIRECT_NOT_DONE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_PIPE) sprintf(&buf[strlen(buf)], "%sREDIRECT_TO_PIPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_STDPIPE) sprintf(&buf[strlen(buf)], "%sREDIRECT_TO_STDPIPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_FILE) sprintf(&buf[strlen(buf)], "%sREDIRECT_TO_FILE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_FAILURE) sprintf(&buf[strlen(buf)], "%sREDIRECT_FAILURE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_SHELL_ESCAPE) sprintf(&buf[strlen(buf)], "%sREDIRECT_SHELL_ESCAPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_SHELL_COMMAND) sprintf(&buf[strlen(buf)], "%sREDIRECT_SHELL_COMMAND", others++ ? "|" : ""); if (pc->redirect & REDIRECT_PID_KNOWN) sprintf(&buf[strlen(buf)], "%sREDIRECT_PID_KNOWN", others++ ? "|" : ""); if (pc->redirect & REDIRECT_MULTI_PIPE) sprintf(&buf[strlen(buf)], "%sREDIRECT_MULTI_PIPE", others++ ? "|" : ""); if (pc->redirect) strcat(buf, ")"); if (strlen(buf)) { if (strlen(buf) > 54) fprintf(fp, "\n%s\n", mkstring(buf, 80, CENTER|LJUST, NULL)); else fprintf(fp, "%s\n", buf); } if (!pc->redirect) fprintf(fp, "\n"); fprintf(fp, " stdpipe_pid: %d\n", pc->stdpipe_pid); fprintf(fp, " pipe_pid: %d\n", pc->pipe_pid); fprintf(fp, " pipe_shell_pid: %d\n", pc->pipe_shell_pid); fprintf(fp, " pipe_command: %s\n", pc->pipe_command); if (pc->symfile && pc->symfile2) { fprintf(fp, " symfile: %lx (%ld)\n", (ulong)pc->symfile, (ulong)ftell(pc->symfile)); fprintf(fp, " symfile2: %lx (%ld)\n", (ulong)pc->symfile2, (ulong)ftell(pc->symfile2)); } else { fprintf(fp, " symfile: %lx \n", (ulong)pc->symfile); fprintf(fp, " symfile2: %lx \n", (ulong)pc->symfile2); } fprintf(fp, " tmpfile: %lx\n", (ulong)pc->tmpfile); fprintf(fp, " saved_fp: %lx\n", (ulong)pc->saved_fp); fprintf(fp, " tmp_fp: %lx\n", (ulong)pc->tmp_fp); fprintf(fp, " tmpfile2: %lx\n", (ulong)pc->tmpfile2); fprintf(fp, " cmd_table: %s\n", XEN_HYPER_MODE() ? "xen_hyper_command_table" : "linux_command_table"); fprintf(fp, " curcmd: %s\n", pc->curcmd); fprintf(fp, " lastcmd: %s\n", pc->lastcmd); fprintf(fp, " cur_gdb_cmd: %d %s\n", pc->cur_gdb_cmd, gdb_command_string(pc->cur_gdb_cmd, buf, FALSE)); fprintf(fp, " last_gdb_cmd: %d %s\n", pc->last_gdb_cmd, gdb_command_string(pc->last_gdb_cmd, buf, FALSE)); fprintf(fp, " cur_req: %lx\n", (ulong)pc->cur_req); fprintf(fp, " cmdgencur: %ld\n", pc->cmdgencur); fprintf(fp, " curcmd_flags: %lx (", pc->curcmd_flags); others = 0; if (pc->curcmd_flags & XEN_MACHINE_ADDR) fprintf(fp, "%sXEN_MACHINE_ADDR", others ? "|" : ""); if (pc->curcmd_flags & REPEAT) fprintf(fp, "%sREPEAT", others ? "|" : ""); if (pc->curcmd_flags & IDLE_TASK_SHOWN) fprintf(fp, "%sIDLE_TASK_SHOWN", others ? "|" : ""); if (pc->curcmd_flags & TASK_SPECIFIED) fprintf(fp, "%sTASK_SPECIFIED", others ? "|" : ""); if (pc->curcmd_flags & MEMTYPE_UVADDR) fprintf(fp, "%sMEMTYPE_UVADDR", others ? "|" : ""); if (pc->curcmd_flags & MEMTYPE_FILEADDR) fprintf(fp, "%sMEMTYPE_FILEADDR", others ? "|" : ""); if (pc->curcmd_flags & HEADER_PRINTED) fprintf(fp, "%sHEADER_PRINTED", others ? "|" : ""); if (pc->curcmd_flags & BAD_INSTRUCTION) fprintf(fp, "%sBAD_INSTRUCTION", others ? "|" : ""); if (pc->curcmd_flags & UD2A_INSTRUCTION) fprintf(fp, "%sUD2A_INSTRUCTION", others ? "|" : ""); if (pc->curcmd_flags & IRQ_IN_USE) fprintf(fp, "%sIRQ_IN_USE", others ? "|" : ""); if (pc->curcmd_flags & IGNORE_ERRORS) fprintf(fp, "%sIGNORE_ERRORS", others ? "|" : ""); if (pc->curcmd_flags & FROM_RCFILE) fprintf(fp, "%sFROM_RCFILE", others ? "|" : ""); if (pc->curcmd_flags & MEMTYPE_KVADDR) fprintf(fp, "%sMEMTYPE_KVADDR", others ? "|" : ""); if (pc->curcmd_flags & NO_MODIFY) fprintf(fp, "%sNO_MODIFY", others ? "|" : ""); if (pc->curcmd_flags & MOD_SECTIONS) fprintf(fp, "%sMOD_SECTIONS", others ? "|" : ""); if (pc->curcmd_flags & MOD_READNOW) fprintf(fp, "%sMOD_READNOW", others ? "|" : ""); if (pc->curcmd_flags & MM_STRUCT_FORCE) fprintf(fp, "%sMM_STRUCT_FORCE", others ? "|" : ""); if (pc->curcmd_flags & CPUMASK) fprintf(fp, "%sCPUMASK", others ? "|" : ""); if (pc->curcmd_flags & PARTIAL_READ_OK) fprintf(fp, "%sPARTIAL_READ_OK", others ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " curcmd_private: %llx\n", pc->curcmd_private); fprintf(fp, " cmd_cleanup: %lx\n", (ulong)pc->cmd_cleanup); fprintf(fp, " cmd_cleanup_arg: %lx\n", (ulong)pc->cmd_cleanup_arg); fprintf(fp, " sigint_cnt: %d\n", pc->sigint_cnt); fprintf(fp, " sigaction: %lx\n", (ulong)&pc->sigaction); fprintf(fp, " gdb_sigaction: %lx\n", (ulong)&pc->gdb_sigaction); fprintf(fp, " main_loop_env: %lx\n", (ulong)&pc->main_loop_env); fprintf(fp, " foreach_loop_env: %lx\n", (ulong)&pc->foreach_loop_env); fprintf(fp, "gdb_interface_env: %lx\n", (ulong)&pc->gdb_interface_env); fprintf(fp, " termios_orig: %lx\n", (ulong)&pc->termios_orig); fprintf(fp, " termios_raw: %lx\n", (ulong)&pc->termios_raw); fprintf(fp, " ncmds: %d\n", pc->ncmds); fprintf(fp, " cmdlist: %lx\n", (ulong)pc->cmdlist); fprintf(fp, " cmdlistsz: %d\n", pc->cmdlistsz); fprintf(fp, " output_radix: %d (%s)\n", pc->output_radix, pc->output_radix == 16 ? "hex" : ((pc->output_radix == 10) ? "decimal" : "???")); fprintf(fp, " server: %s\n", pc->server); fprintf(fp, " server_pid: %ld\n", pc->server_pid); fprintf(fp, " port: %d\n", pc->port); fprintf(fp, " sockfd: %d\n", pc->sockfd); fprintf(fp, " server_memsrc: %s\n", pc->server_memsrc); fprintf(fp, " server_namelist: %s\n", pc->server_namelist); fprintf(fp, " rmfd: %d\n", pc->rmfd); fprintf(fp, " rkfd: %d\n", pc->rkfd); fprintf(fp, " rcvbufsize: %ld\n", pc->rcvbufsize); fprintf(fp, " readmem: "); if ((p1 = readmem_function_name())) fprintf(fp, "%s()\n", p1); else fprintf(fp, "%lx\n", (ulong)pc->readmem); fprintf(fp, " writemem: "); if ((p1 = writemem_function_name())) fprintf(fp, "%s()\n", p1); else fprintf(fp, "%lx\n", (ulong)pc->writemem); fprintf(fp, " dumpfile memory: %d\n", dumpfile_memory(DUMPFILE_MEM_USED)); fprintf(fp, " curext: %lx\n", (ulong)pc->curext); fprintf(fp, " sbrk: %lx\n", (ulong)pc->sbrk); fprintf(fp, " cleanup: %s\n", pc->cleanup); fprintf(fp, " scope: %lx %s\n", pc->scope, pc->scope ? "" : "(not set)"); fprintf(fp, " nr_hash_queues: %ld\n", pc->nr_hash_queues); fprintf(fp, " read_vmcoreinfo: %lx\n", (ulong)pc->read_vmcoreinfo); } char * readmem_function_name(void) { if (pc->readmem == read_dev_mem) return("read_dev_mem"); else if (pc->readmem == read_mclx_dumpfile) return("read_mclx_dumpfile"); else if (pc->readmem == read_lkcd_dumpfile) return("read_lkcd_dumpfile"); else if (pc->readmem == read_daemon) return("read_daemon"); else if (pc->readmem == read_netdump) return("read_netdump"); else if (pc->readmem == read_xendump) return("read_xendump"); else if (pc->readmem == read_kdump) return("read_kdump"); else if (pc->readmem == read_memory_device) return("read_memory_device"); else if (pc->readmem == read_xendump_hyper) return("read_xendump_hyper"); else if (pc->readmem == read_diskdump) return("read_diskdump"); else if (pc->readmem == read_proc_kcore) return("read_proc_kcore"); else if (pc->readmem == read_sadump) return("read_sadump"); else if (pc->readmem == read_s390_dumpfile) return("read_s390_dumpfile"); else if (pc->readmem == read_ramdump) return("read_ramdump"); else if (pc->readmem == read_vmware_vmss) return("read_vmware_vmss"); else return NULL; } char * writemem_function_name(void) { if (pc->writemem == write_dev_mem) return("write_dev_mem"); else if (pc->writemem == write_mclx_dumpfile) return("write_mclx_dumpfile"); else if (pc->writemem == write_lkcd_dumpfile) return("write_lkcd_dumpfile"); else if (pc->writemem == write_daemon) return("write_daemon"); else if (pc->writemem == write_netdump) return("write_netdump"); else if (pc->writemem == write_xendump) return("write_xendump"); else if (pc->writemem == write_kdump) return("write_kdump"); else if (pc->writemem == write_memory_device) return("write_memory_device"); // else if (pc->writemem == write_xendump_hyper) // return("write_xendump_hyper"); else if (pc->writemem == write_diskdump) return("write_diskdump"); else if (pc->writemem == write_proc_kcore) return("write_proc_kcore"); else if (pc->writemem == write_sadump) return("write_sadump"); else if (pc->writemem == write_s390_dumpfile) return("write_s390_dumpfile"); else if (pc->writemem == write_vmware_vmss) return("write_vmware_vmss"); else return NULL; } /* * "help -B" output */ void dump_build_data(void) { fprintf(fp, " build_command: %s\n", build_command); fprintf(fp, " build_data: %s\n", build_data); fprintf(fp, " build_target: %s\n", build_target); fprintf(fp, " build_version: %s\n", build_version); fprintf(fp, "compiler version: %s\n", compiler_version); } /* * Perform any cleanup activity here. */ int clean_exit(int status) { if (pc->flags & MEMMOD) cleanup_memory_driver(); if ((pc->namelist_orig) && file_exists(pc->namelist, NULL)) unlink(pc->namelist); if ((pc->namelist_debug_orig) && file_exists(pc->namelist_debug, NULL)) unlink(pc->namelist_debug); if (pc->cleanup && file_exists(pc->cleanup, NULL)) unlink(pc->cleanup); ramdump_cleanup(); exit(status); } /* * Check whether this session is for xen hypervisor analysis. */ static void check_xen_hyper(void) { if (!pc->namelist) return; if (!XEN_HYPER_MODE()) { if (STRNEQ(basename(pc->namelist), "xen-syms")) pc->flags |= XEN_HYPER; else return; } #ifdef XEN_HYPERVISOR_ARCH pc->cmd_table = xen_hyper_command_table; if (pc->flags & XENDUMP) pc->readmem = read_xendump_hyper; #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } /* * Reject untrusted .crashrc, $HOME/.crashrc, * .gdbinit, and $HOME/.gdbinit files. */ static char *untrusted_file_list[4] = { 0 }; int untrusted_file(FILE *filep, char *filename) { struct stat sbuf; int i; if (filep && (fstat(fileno(filep), &sbuf) == 0) && (sbuf.st_uid == getuid()) && !(sbuf.st_mode & S_IWOTH)) return FALSE; for (i = 0; i < 4; i++) { if (!untrusted_file_list[i]) { untrusted_file_list[i] = strdup(filename); break; } } return TRUE; } static void show_untrusted_files(void) { int i, cnt; for (i = cnt = 0; i < 4; i++) { if (untrusted_file_list[i]) { error(WARNING, "not using untrusted file: \"%s\"\n", untrusted_file_list[i]); free(untrusted_file_list[i]); cnt++; } } if (cnt) fprintf(fp, "\n"); } /* * If GET_OSRELEASE is still set, the OS release has been * found and displayed. */ static void get_osrelease(char *dumpfile) { int retval = 1; if (is_flattened_format(dumpfile)) { if (pc->flags2 & GET_OSRELEASE) retval = 0; } else if (is_diskdump(dumpfile)) { if (pc->flags2 & GET_OSRELEASE) retval = 0; } else if (is_kdump(dumpfile, KDUMP_LOCAL)) { if (pc->flags2 & GET_OSRELEASE) retval = 0; } if (retval) fprintf(fp, "unknown\n"); clean_exit(retval); } static void get_log(char *dumpfile) { int retval = 1; if (is_flattened_format(dumpfile)) pc->flags2 |= FLAT; if (is_diskdump(dumpfile)) { if (pc->flags2 & GET_LOG) retval = 0; } else if (is_kdump(dumpfile, KDUMP_LOCAL)) { if (pc->flags2 & GET_LOG) retval = 0; } if (retval) fprintf(fp, "%s: no VMCOREINFO data\n", dumpfile); clean_exit(retval); } static char * no_vmcoreinfo(const char *unused) { return NULL; } crash-7.1.4/lkcd_vmdump_v2_v3.h0000775000000000000000000001655212634305150015033 0ustar rootroot/* lkcd_vmdump_v2_v3.h - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _VMDUMP_H #define _VMDUMP_H /* necessary header files */ #ifndef MCLX #include /* for utsname structure */ #include /* for architecture-specific header */ #endif #if defined(ARM) || defined(X86) || defined(PPC) || defined(S390) || \ defined(S390X) || defined(ARM64) || defined(MIPS) /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * */ /* This header file holds the architecture specific crash dump header */ #ifndef _ASM_VMDUMP_H #define _ASM_VMDUMP_H /* necessary header files */ typedef unsigned int u32; #include /* for pt_regs */ /* definitions */ #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x1 /* version number */ /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the esp for i386 systems */ uint32_t dha_esp; /* the eip for i386 systems */ uint32_t dha_eip; /* the dump registers */ #ifndef S390 #ifndef S390X #ifndef ARM64 struct pt_regs dha_regs; #endif #endif #endif } dump_header_asm_t; #endif /* _ASM_VMDUMP_H */ #endif /* ARM || X86 || PPC */ #if defined(ALPHA) || defined(IA64) || defined(X86_64) || defined(PPC64) /* * Plug in the real ../arch/alpha/vmdump.h when available. For now the * data here are just placeholders... */ #ifndef IA64 typedef unsigned int u32; #include /* for pt_regs */ #endif /* definitions */ #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x1 /* version number */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the esp for i386 systems */ uint32_t dha_esp; /* the eip for i386 systems */ uint32_t dha_eip; /* the dump registers */ #ifndef IA64 struct pt_regs dha_regs; #endif } dump_header_asm_t; #endif /* ALPHA or IA64 (?) */ /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ #ifdef CONFIG_VMDUMP /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_VERSION_NUMBER 0x2 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump flags -- add as necessary */ #define DUMP_RAW 0x1 /* raw page (no compression) */ #define DUMP_COMPRESSED 0x2 /* page is compressed */ #define DUMP_END 0x4 /* end marker on a full dump */ /* dump types - type specific stuff added later for page typing */ #define DUMP_NONE 0 /* no dumping at all -- just bail */ #define DUMP_HEADER 1 /* kernel dump header only */ #define DUMP_KERN 2 /* dump header and kernel pages */ #define DUMP_USED 3 /* dump header, kernel/user pages */ #define DUMP_ALL 4 /* dump header, all memory pages */ /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the utsname (uname) information */ struct new_utsname dh_utsname; /* the address of the current task */ struct task_struct *dh_current_task; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; #endif /* CONFIG_VMDUMP */ #ifdef __KERNEL__ extern void dump_init(uint64_t, uint64_t); extern void dump_open(char *); extern void dump_execute(char *, struct pt_regs *); #endif #endif /* _VMDUMP_H */ crash-7.1.4/mips.c0000664000000000000000000005723112634305150012446 0ustar rootroot/* * mips.c - core analysis suite * * Copyright (C) 2015 Rabin Vincent * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef MIPS #include #include "defs.h" /* From arch/mips/asm/include/pgtable{,-32}.h */ typedef ulong pgd_t; typedef ulong pte_t; #define PTE_ORDER 0 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) #define __PGD_ORDER (32 - 3 * PAGESHIFT() + PGD_T_LOG2 + PTE_T_LOG2) #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) #define PGD_SIZE (PAGESIZE() << PGD_ORDER) #define PGDIR_SHIFT (2 * PAGESHIFT() + PTE_ORDER - PTE_T_LOG2) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) #define PTRS_PER_PTE ((PAGESIZE() << PTE_ORDER) / sizeof(pte_t)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pte_offset(address) \ (((address) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) #define MIPS_CPU_RIXI 0x00800000llu #define MIPS32_EF_R0 6 #define MIPS32_EF_R29 35 #define MIPS32_EF_R31 37 #define MIPS32_EF_CPU0_EPC 40 static struct machine_specific mips_machine_specific = { 0 }; static void mips_display_machine_stats(void) { fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, "\n"); #define PRINT_PAGE_FLAG(flag) \ if (flag) \ fprintf(fp, " %14s: %08lx\n", #flag, flag) PRINT_PAGE_FLAG(_PAGE_PRESENT); PRINT_PAGE_FLAG(_PAGE_READ); PRINT_PAGE_FLAG(_PAGE_WRITE); PRINT_PAGE_FLAG(_PAGE_ACCESSED); PRINT_PAGE_FLAG(_PAGE_MODIFIED); PRINT_PAGE_FLAG(_PAGE_GLOBAL); PRINT_PAGE_FLAG(_PAGE_VALID); PRINT_PAGE_FLAG(_PAGE_NO_READ); PRINT_PAGE_FLAG(_PAGE_NO_EXEC); PRINT_PAGE_FLAG(_PAGE_DIRTY); } static void mips_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); mips_display_machine_stats(); } #define PGDIR_OFFSET(X) (((ulong)(X)) & (PGD_SIZE - 1)) static void mips_init_page_flags(void) { ulong shift = 0; _PAGE_PRESENT = 1UL << shift++; if (THIS_KERNEL_VERSION >= LINUX(4,1,0)) { _PAGE_WRITE = 1UL << shift++; _PAGE_ACCESSED = 1UL << shift++; _PAGE_MODIFIED = 1UL << shift++; _PAGE_NO_EXEC = 1UL << shift++; _PAGE_READ = _PAGE_NO_READ = 1UL << shift++; } else { ulonglong cpu_options; int rixi; ulong addr; addr = symbol_value("cpu_data") + MEMBER_OFFSET("cpuinfo_mips", "options"); readmem(addr, KVADDR, &cpu_options, sizeof(cpu_options), "cpu_data[0].options", FAULT_ON_ERROR); rixi = cpu_options & MIPS_CPU_RIXI; if (!rixi) _PAGE_READ = 1UL << shift++; _PAGE_WRITE = 1UL << shift++; _PAGE_ACCESSED = 1UL << shift++; _PAGE_MODIFIED = 1UL << shift++; if (rixi) { _PAGE_NO_EXEC = 1UL << shift++; _PAGE_NO_READ = 1UL << shift++; } } _PAGE_GLOBAL = 1UL << shift++; _PAGE_VALID = 1UL << shift++; _PAGE_DIRTY = 1UL << shift++; _PFN_SHIFT = PAGESHIFT() - 12 + shift + 3; } static int mips_translate_pte(ulong pte, void *physaddr, ulonglong pte64) { char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char buf[BUFSIZE]; int present; ulong paddr; int len1, len2, others; present = pte & _PAGE_PRESENT; paddr = (pte >> _PFN_SHIFT) << PAGESHIFT(); if (physaddr) { *(ulong *)physaddr = PAGEBASE(pte); return !!present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER | LJUST, "PTE")); if (!present) return !!present; sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER | LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER | RJUST, NULL), mkstring(physbuf, len2, CENTER | RJUST, NULL)); fprintf(fp, "("); others = 0; #define CHECK_PAGE_FLAG(flag) \ if ((_PAGE_##flag) && (pte & _PAGE_##flag)) \ fprintf(fp, "%s" #flag, others++ ? "|" : "") if (pte) { CHECK_PAGE_FLAG(PRESENT); CHECK_PAGE_FLAG(READ); CHECK_PAGE_FLAG(WRITE); CHECK_PAGE_FLAG(ACCESSED); CHECK_PAGE_FLAG(MODIFIED); CHECK_PAGE_FLAG(GLOBAL); CHECK_PAGE_FLAG(VALID); CHECK_PAGE_FLAG(NO_READ); CHECK_PAGE_FLAG(NO_EXEC); CHECK_PAGE_FLAG(DIRTY); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return !!present; } static int mips_pgd_vtop(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong invalid_pte_table = symbol_value("invalid_pte_table"); ulong *page_dir; ulong pgd_pte, page_table; ulong pte; ulong pbase; if (verbose) { const char *segment; if (vaddr < 0x80000000lu) segment = "useg"; else if (vaddr < 0xa0000000lu) segment = "kseg0"; else if (vaddr < 0xc0000000lu) segment = "kseg1"; else if (vaddr < 0xe0000000lu) segment = "ksseg"; else segment = "kseg3"; fprintf(fp, "SEGMENT: %s\n", segment); } if (vaddr >= 0x80000000lu && vaddr < 0xc0000000lu) { *paddr = VTOP(vaddr); return TRUE; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + pgd_index(vaddr); FILL_PGD(PAGEBASE(pgd), KVADDR, PGD_SIZE); pgd_pte = ULONG(machdep->pgd + PGDIR_OFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %08lx => %lx\n", (ulong)page_dir, pgd_pte); if (pgd_pte == invalid_pte_table) { fprintf(fp, "invalid\n"); return FALSE; } page_table = VTOP(pgd_pte) + sizeof(pte_t) * pte_offset(vaddr); FILL_PTBL(PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %08lx => %08lx\n", page_table, pte); if (!(pte & _PAGE_PRESENT)) { if (verbose) { fprintf(fp, "\n"); mips_translate_pte((ulong)pte, 0, pte); } return FALSE; } pbase = (pte >> _PFN_SHIFT) << PAGESHIFT(); *paddr = pbase + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %08lx\n\n", pbase); mips_translate_pte(pte, 0, 0); } return TRUE; } static int mips_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { ulong active_mm; readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } else { ulong mm; mm = task_mm(tc->task, TRUE); if (mm) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } return mips_pgd_vtop(pgd, vaddr, paddr, verbose); } static int mips_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if (!verbose && !IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); return TRUE; } return mips_pgd_vtop((ulong *)vt->kernel_pgd[0], kvaddr, paddr, verbose); } static void mips_dump_exception_stack(struct bt_info *bt, char *pt_regs) { struct mips_pt_regs_main *mains; struct mips_pt_regs_cp0 *cp0; int i; char buf[BUFSIZE]; mains = (struct mips_pt_regs_main *) (pt_regs + OFFSET(pt_regs_regs)); cp0 = (struct mips_pt_regs_cp0 *) \ (pt_regs + OFFSET(pt_regs_cp0_badvaddr)); for (i = 0; i < 32; i += 4) { fprintf(fp, " $%2d : %08lx %08lx %08lx %08lx\n", i, mains->regs[i], mains->regs[i+1], mains->regs[i+2], mains->regs[i+3]); } fprintf(fp, " Hi : %08lx\n", mains->hi); fprintf(fp, " Lo : %08lx\n", mains->lo); value_to_symstr(cp0->cp0_epc, buf, 16); fprintf(fp, " epc : %08lx %s\n", cp0->cp0_epc, buf); value_to_symstr(mains->regs[31], buf, 16); fprintf(fp, " ra : %08lx %s\n", mains->regs[31], buf); fprintf(fp, " Status: %08lx\n", mains->cp0_status); fprintf(fp, " Cause : %08lx\n", cp0->cp0_cause); fprintf(fp, " BadVA : %08lx\n", cp0->cp0_badvaddr); } struct mips_unwind_frame { ulong sp; ulong pc; ulong ra; }; static void mips_display_full_frame(struct bt_info *bt, struct mips_unwind_frame *current, struct mips_unwind_frame *previous) { ulong words, addr; ulong *up; char buf[BUFSIZE]; int i, u_idx; if (!INSTACK(previous->sp, bt) || !INSTACK(current->sp, bt)) return; words = (previous->sp - current->sp) / sizeof(ulong); if (words == 0) { fprintf(fp, " (no frame)\n"); return; } addr = current->sp; u_idx = (current->sp - bt->stackbase) / sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if ((i % 4) == 0) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx * sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } static int mips_is_exception_entry(struct syment *sym) { return STREQ(sym->name, "ret_from_exception") || STREQ(sym->name, "ret_from_irq") || STREQ(sym->name, "work_resched") || STREQ(sym->name, "handle_sys"); } static void mips_dump_backtrace_entry(struct bt_info *bt, struct syment *sym, struct mips_unwind_frame *current, struct mips_unwind_frame *previous, int level) { const char *name = sym->name; struct load_module *lm; char *name_plus_offset; char buf[BUFSIZE]; name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { struct syment *symp; ulong symbol_offset; symp = value_search(current->pc, &symbol_offset); if (symp && symbol_offset) name_plus_offset = value_to_symstr(current->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, current->sp, name_plus_offset ? name_plus_offset : name, current->pc); if (module_symbol(current->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { char buf[BUFSIZE]; get_line_number(current->pc, buf, FALSE); if (strlen(buf)) fprintf(fp, " %s\n", buf); } if (mips_is_exception_entry(sym)) { char pt_regs[SIZE(pt_regs)]; GET_STACK_DATA(current->sp, &pt_regs, SIZE(pt_regs)); mips_dump_exception_stack(bt, pt_regs); } if (bt->flags & BT_FULL) { fprintf(fp, " " "[PC: %08lx RA: %08lx SP: %08lx SIZE: %ld]\n", current->pc, current->ra, current->sp, previous->sp - current->sp); mips_display_full_frame(bt, current, previous); } } static void mips_analyze_function(ulong start, ulong offset, struct mips_unwind_frame *current, struct mips_unwind_frame *previous) { ulong rapos = 0; ulong spadjust = 0; ulong *funcbuf, *ip; ulong i; if (CRASHDEBUG(8)) fprintf(fp, "%s: start %#lx offset %#lx\n", __func__, start, offset); if (!offset) { previous->sp = current->sp; return; } ip = funcbuf = (ulong *)GETBUF(offset); if (!readmem(start, KVADDR, funcbuf, offset, "mips_analyze_function", RETURN_ON_ERROR)) { FREEBUF(funcbuf); error(FATAL, "Cannot read function at %8x", start); return; } for (i = 0; i < offset; i += 4) { ulong insn = *ip; ulong high = (insn >> 16) & 0xffff; ulong low = insn & 0xffff; if (CRASHDEBUG(8)) fprintf(fp, "insn @ %#lx = %#lx\n", start + i, insn); if (high == 0x27bd) { /* ADDIU sp, sp, imm */ if (!(low & 0x8000)) break; spadjust += 0x10000 - low; if (CRASHDEBUG(8)) fprintf(fp, "spadjust = %lu\n", spadjust); } else if (high == 0xafbf) { /* SW RA, imm(SP) */ rapos = current->sp + low; if (CRASHDEBUG(8)) fprintf(fp, "rapos %lx\n", rapos); break; } ip++; } FREEBUF(funcbuf); previous->sp = current->sp + spadjust; if (rapos && !readmem(rapos, KVADDR, ¤t->ra, sizeof(current->ra), "RA from stack", RETURN_ON_ERROR)) { error(FATAL, "Cannot read RA from stack %lx", rapos); return; } } static void mips_back_trace_cmd(struct bt_info *bt) { struct mips_unwind_frame current, previous; int level = 0; previous.sp = previous.pc = previous.ra = 0; current.pc = bt->instptr; current.sp = bt->stkptr; current.ra = 0; if (bt->machdep) { struct mips_regset *regs = bt->machdep; previous.pc = current.ra = regs->regs[MIPS32_EF_R31]; } while (INSTACK(current.sp, bt)) { struct syment *symbol; ulong offset; if (CRASHDEBUG(8)) fprintf(fp, "level %d pc %#lx ra %#lx sp %lx\n", level, current.pc, current.ra, current.sp); if (!IS_KVADDR(current.pc)) return; symbol = value_search(current.pc, &offset); if (!symbol) { error(FATAL, "PC is unknown symbol (%lx)", current.pc); return; } /* * If we get an address which points to the start of a * function, then it could one of the following: * * - we are dealing with a noreturn function. The last call * from a noreturn function has an an ra which points to the * start of the function after it. This is common in the * oops callchain because of die() which is annotated as * noreturn. * * - we have taken an exception at the start of this function. * In this case we already have the RA in current.ra. * * - we are in one of these routines which appear with zero * offset in manually-constructed stack frames: * * * ret_from_exception * * ret_from_irq * * ret_from_fork * * ret_from_kernel_thread */ if (!current.ra && !offset && !STRNEQ(symbol->name, "ret_from")) { if (CRASHDEBUG(8)) fprintf(fp, "zero offset at %s, try previous symbol\n", symbol->name); symbol = value_search(current.pc - 4, &offset); if (!symbol) { error(FATAL, "PC is unknown symbol (%lx)", current.pc); return; } } if (mips_is_exception_entry(symbol)) { struct mips_pt_regs_main *mains; struct mips_pt_regs_cp0 *cp0; char pt_regs[SIZE(pt_regs)]; mains = (struct mips_pt_regs_main *) \ (pt_regs + OFFSET(pt_regs_regs)); cp0 = (struct mips_pt_regs_cp0 *) \ (pt_regs + OFFSET(pt_regs_cp0_badvaddr)); GET_STACK_DATA(current.sp, pt_regs, sizeof(pt_regs)); previous.ra = mains->regs[31]; previous.sp = mains->regs[29]; current.ra = cp0->cp0_epc; if (CRASHDEBUG(8)) fprintf(fp, "exception pc %#lx ra %#lx sp %lx\n", previous.pc, previous.ra, previous.sp); } else { mips_analyze_function(symbol->value, offset, ¤t, &previous); } mips_dump_backtrace_entry(bt, symbol, ¤t, &previous, level++); if (!current.ra) break; current.pc = current.ra; current.sp = previous.sp; current.ra = previous.ra; previous.sp = previous.pc = previous.ra = 0; } } static void mips_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { struct mips_regset *regs; regs = bt->machdep; if (!regs) { fprintf(fp, "0%lx: Register values not available\n", bt->task); return; } if (nip) *nip = regs->regs[MIPS32_EF_CPU0_EPC]; if (ksp) *ksp = regs->regs[MIPS32_EF_R29]; } static int mips_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (!bt->tc || !(tt->flags & THREAD_INFO)) return FALSE; if (!readmem(bt->task + OFFSET(task_struct_thread_reg31), KVADDR, pcp, sizeof(*pcp), "thread_struct.regs31", RETURN_ON_ERROR)) { return FALSE; } if (!readmem(bt->task + OFFSET(task_struct_thread_reg29), KVADDR, spp, sizeof(*spp), "thread_struct.regs29", RETURN_ON_ERROR)) { return FALSE; } return TRUE; } static void mips_stackframe_init(void) { long task_struct_thread = MEMBER_OFFSET("task_struct", "thread"); long thread_reg29 = MEMBER_OFFSET("thread_struct", "reg29"); long thread_reg31 = MEMBER_OFFSET("thread_struct", "reg31"); if ((task_struct_thread == INVALID_OFFSET) || (thread_reg29 == INVALID_OFFSET) || (thread_reg31 == INVALID_OFFSET)) { error(FATAL, "cannot determine thread_struct offsets\n"); return; } ASSIGN_OFFSET(task_struct_thread_reg29) = task_struct_thread + thread_reg29; ASSIGN_OFFSET(task_struct_thread_reg31) = task_struct_thread + thread_reg31; STRUCT_SIZE_INIT(pt_regs, "pt_regs"); MEMBER_OFFSET_INIT(pt_regs_regs, "pt_regs", "regs"); MEMBER_OFFSET_INIT(pt_regs_cp0_badvaddr, "pt_regs", "cp0_badvaddr"); } static void mips_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { *pcp = 0; *spp = 0; if (DUMPFILE() && is_task_active(bt->task)) mips_dumpfile_stack_frame(bt, pcp, spp); else mips_get_frame(bt, pcp, spp); } static int mips_eframe_search(struct bt_info *bt) { return error(FATAL, "%s: not implemented\n", __func__); } static ulong mips_get_task_pgd(ulong task) { return error(FATAL, "%s: not implemented\n", __func__); } static int mips_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); return (IS_KVADDR(task) && ALIGNED_STACK_OFFSET(task) == 0); } static ulong mips_processor_speed(void) { return 0; } static int mips_get_smp_cpus(void) { return (get_cpus_online() > 0) ? get_cpus_online() : kt->cpus; } static ulong mips_vmalloc_start(void) { return first_vmalloc_address(); } static int mips_verify_symbol(const char *name, ulong value, char type) { if (STREQ(name, "_text")) machdep->flags |= KSYMS_START; return (name && strlen(name) && (machdep->flags & KSYMS_START) && !STRNEQ(name, "__func__.") && !STRNEQ(name, "__crc_")); } void mips_dump_machdep_table(ulong arg) { int others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " pgdir_shift: %d\n", PGDIR_SHIFT); fprintf(fp, " ptrs_per_pgd: %lu\n", PTRS_PER_PGD); fprintf(fp, " ptrs_per_pte: %d\n", PTRS_PER_PTE); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: mips_eframe_search()\n"); fprintf(fp, " back_trace: mips_back_trace_cmd()\n"); fprintf(fp, " processor_speed: mips_processor_speed()\n"); fprintf(fp, " uvtop: mips_uvtop()\n"); fprintf(fp, " kvtop: mips_kvtop()\n"); fprintf(fp, " get_task_pgd: mips_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " get_stack_frame: mips_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: mips_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: mips_vmalloc_start()\n"); fprintf(fp, " is_task_addr: mips_is_task_addr()\n"); fprintf(fp, " verify_symbol: mips_verify_symbol()\n"); fprintf(fp, " dis_filter: generic_dis_filter()\n"); fprintf(fp, " cmd_mach: mips_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: mips_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: NULL\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } static ulong mips_get_page_size(void) { struct syment *spd, *next = NULL; spd = symbol_search("swapper_pg_dir"); if (spd) next = next_symbol(NULL, spd); if (!spd || !next) return memory_page_size(); return next->value - spd->value; } void mips_init(int when) { #if defined(__i386__) || defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the MIPS architecture\n"); #endif switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf32_notes; break; case PRE_SYMTAB: machdep->verify_symbol = mips_verify_symbol; machdep->machspec = &mips_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: machdep->pagesize = mips_get_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); if (machdep->pagesize >= 16384) machdep->stacksize = machdep->pagesize; else machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = malloc(PGD_SIZE)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->ptbl = malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->kvbase = 0x80000000; machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->uvtop = mips_uvtop; machdep->kvtop = mips_kvtop; machdep->vmalloc_start = mips_vmalloc_start; machdep->eframe_search = mips_eframe_search; machdep->back_trace = mips_back_trace_cmd; machdep->processor_speed = mips_processor_speed; machdep->get_task_pgd = mips_get_task_pgd; machdep->get_stack_frame = mips_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = mips_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = mips_is_task_addr; machdep->dis_filter = generic_dis_filter; machdep->cmd_mach = mips_cmd_mach; machdep->get_smp_cpus = mips_get_smp_cpus; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: mips_init_page_flags(); machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); mips_stackframe_init(); break; } } #endif /* MIPS */ crash-7.1.4/unwind_arm.c0000664000000000000000000004613612634305150013643 0ustar rootroot/* * Stack unwinding support for ARM * * This code is derived from the kernel source: * arch/arm/kernel/unwind.c * Copyright (C) 2008 ARM Limited * * Created by: Mika Westerberg * Copyright (C) 2010 Nokia Corporation * * For more information about ARM unwind tables see "Exception handling ABI for * the ARM architecture" document at: * * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef ARM #include "defs.h" /** * struct unwind_idx - index table entry * @addr: prel31 offset to the start of the function * @insn: index table entry. * * @insn can be encoded as follows: * 1. if bit31 is clear this points to the start of the EHT entry * (prel31 offset) * 2. if bit31 is set, this contains the EHT entry itself * 3. if 0x1, cannot unwind. */ struct unwind_idx { ulong addr; ulong insn; }; /** * struct unwind_table - per-module unwind table * @idx: pointer to the star of the unwind table * @start: pointer to the start of the index table * @end: pointer to the last element +1 of the index table * @begin_addr: start address which this table covers * @end_addr: end address which this table covers * @kv_base: kernel virtual address of the start of the index table * * Kernel stores per-module unwind tables in this format. There can be more than * one table per module as we have different ELF sections in the module. */ struct unwind_table { struct unwind_idx *idx; struct unwind_idx *start; struct unwind_idx *end; ulong begin_addr; ulong end_addr; ulong kv_base; }; /* * Unwind table pointers to master kernel table and for modules. */ static struct unwind_table *kernel_unwind_table; static struct unwind_table *module_unwind_tables; struct unwind_ctrl_block { ulong vrs[16]; ulong insn; ulong insn_kvaddr; int entries; int byte; }; struct stackframe { ulong fp; ulong sp; ulong lr; ulong pc; }; enum regs { FP = 11, SP = 13, LR = 14, PC = 15, }; static int init_kernel_unwind_table(void); static int read_module_unwind_table(struct unwind_table *, ulong); static int init_module_unwind_tables(void); static int unwind_get_insn(struct unwind_ctrl_block *); static ulong unwind_get_byte(struct unwind_ctrl_block *); static ulong get_value_from_stack(ulong *); static int unwind_exec_insn(struct unwind_ctrl_block *); static int is_core_kernel_text(ulong); static struct unwind_table *search_table(ulong); static struct unwind_idx *search_index(const struct unwind_table *, ulong); static ulong prel31_to_addr(ulong, ulong); static void index_prel31_to_addr(struct unwind_table *); static int unwind_frame(struct stackframe *, ulong); /* * Function reads in-memory kernel and module unwind tables and makes * local copy of them for unwinding. If unwinding tables cannot be found, this * function returns FALSE, otherwise TRUE. */ int init_unwind_tables(void) { if (!symbol_exists("__start_unwind_idx") || !symbol_exists("__stop_unwind_idx") || !symbol_exists("__start_unwind_tab") || !symbol_exists("__stop_unwind_tab") || !symbol_exists("unwind_tables")) { return FALSE; } if (!init_kernel_unwind_table()) { error(WARNING, "UNWIND: failed to initialize kernel unwind table\n"); return FALSE; } /* * Initialize symbols for per-module unwind tables. Actually there are * several tables per module (one per code section). */ STRUCT_SIZE_INIT(unwind_table, "unwind_table"); MEMBER_OFFSET_INIT(unwind_table_list, "unwind_table", "list"); MEMBER_OFFSET_INIT(unwind_table_start, "unwind_table", "start"); MEMBER_OFFSET_INIT(unwind_table_stop, "unwind_table", "stop"); MEMBER_OFFSET_INIT(unwind_table_begin_addr, "unwind_table", "begin_addr"); MEMBER_OFFSET_INIT(unwind_table_end_addr, "unwind_table", "end_addr"); STRUCT_SIZE_INIT(unwind_idx, "unwind_idx"); MEMBER_OFFSET_INIT(unwind_idx_addr, "unwind_idx", "addr"); MEMBER_OFFSET_INIT(unwind_idx_insn, "unwind_idx", "insn"); if (!init_module_unwind_tables()) { error(WARNING, "UNWIND: failed to initialize module unwind tables\n"); } /* * We abuse DWARF_UNWIND flag a little here as ARM unwinding tables are * not in DWARF format but we can use the flags to indicate that we have * unwind tables support ready. */ kt->flags |= DWARF_UNWIND_CAPABLE; kt->flags |= DWARF_UNWIND; return TRUE; } /* * Allocate and fill master kernel unwind table. */ static int init_kernel_unwind_table(void) { ulong idx_start, idx_end, idx_size; kernel_unwind_table = calloc(sizeof(*kernel_unwind_table), 1); if (!kernel_unwind_table) return FALSE; idx_start = symbol_value("__start_unwind_idx"); idx_end = symbol_value("__stop_unwind_idx"); idx_size = idx_end - idx_start; kernel_unwind_table->idx = calloc(idx_size, 1); if (!kernel_unwind_table->idx) goto fail; /* now read in the index table */ if (!readmem(idx_start, KVADDR, kernel_unwind_table->idx, idx_size, "master kernel unwind table", RETURN_ON_ERROR)) { free(kernel_unwind_table->idx); goto fail; } /* * Kernel versions before v3.2 (specifically, before commit * de66a979012db "ARM: 7187/1: fix unwinding for XIP kernels") * converted the prel31 offsets in the unwind index table to absolute * addresses on startup. Newer kernels don't perform this conversion, * and have a slightly more involved search algorithm. * * We always just use the older search method (a straightforward binary * search) and convert the index table offsets ourselves if we detect * that the kernel didn't do it. */ machdep->machspec->unwind_index_prel31 = !is_kernel_text(kernel_unwind_table->idx[0].addr); kernel_unwind_table->start = kernel_unwind_table->idx; kernel_unwind_table->end = (struct unwind_idx *) ((char *)kernel_unwind_table->idx + idx_size); kernel_unwind_table->begin_addr = kernel_unwind_table->start->addr; kernel_unwind_table->end_addr = (kernel_unwind_table->end - 1)->addr; kernel_unwind_table->kv_base = idx_start; if (machdep->machspec->unwind_index_prel31) index_prel31_to_addr(kernel_unwind_table); if (CRASHDEBUG(1)) { fprintf(fp, "UNWIND: master kernel table start\n"); fprintf(fp, "UNWIND: size : %ld\n", idx_size); fprintf(fp, "UNWIND: start : %p\n", kernel_unwind_table->start); fprintf(fp, "UNWIND: end : %p\n", kernel_unwind_table->end); fprintf(fp, "UNWIND: begin_addr: 0x%lx\n", kernel_unwind_table->begin_addr); fprintf(fp, "UNWIND: begin_addr: 0x%lx\n", kernel_unwind_table->end_addr); fprintf(fp, "UNWIND: master kernel table end\n"); } return TRUE; fail: free(kernel_unwind_table); return FALSE; } /* * Read single module unwind table from addr. */ static int read_module_unwind_table(struct unwind_table *tbl, ulong addr) { ulong idx_start, idx_stop, idx_size; char *buf; buf = GETBUF(SIZE(unwind_table)); /* * First read in the unwind table for this module. It then contains * pointers to the index table which we will read later. */ if (!readmem(addr, KVADDR, buf, SIZE(unwind_table), "module unwind table", RETURN_ON_ERROR)) { error(WARNING, "UNWIND: cannot read unwind table\n"); goto fail; } #define TABLE_VALUE(b, offs) (*((ulong *)((b) + OFFSET(offs)))) idx_start = TABLE_VALUE(buf, unwind_table_start); idx_stop = TABLE_VALUE(buf, unwind_table_stop); idx_size = idx_stop - idx_start; /* * We know the size of the index table. Allocate memory for * the table and read the contents from the kernel memory. */ tbl->idx = calloc(idx_size, 1); if (!tbl->idx) goto fail; if (!readmem(idx_start, KVADDR, tbl->idx, idx_size, "module unwind index table", RETURN_ON_ERROR)) { free(tbl->idx); goto fail; } tbl->start = &tbl->idx[0]; tbl->end = (struct unwind_idx *)((char *)tbl->start + idx_size); tbl->begin_addr = TABLE_VALUE(buf, unwind_table_begin_addr); tbl->end_addr = TABLE_VALUE(buf, unwind_table_end_addr); tbl->kv_base = idx_start; if (machdep->machspec->unwind_index_prel31) index_prel31_to_addr(tbl); if (CRASHDEBUG(1)) { fprintf(fp, "UNWIND: module table start\n"); fprintf(fp, "UNWIND: start : %p\n", tbl->start); fprintf(fp, "UNWIND: end : %p\n", tbl->end); fprintf(fp, "UNWIND: begin_addr: 0x%lx\n", tbl->begin_addr); fprintf(fp, "UNWIND: begin_addr: 0x%lx\n", tbl->end_addr); fprintf(fp, "UNWIND: module table end\n"); } FREEBUF(buf); return TRUE; fail: FREEBUF(buf); return FALSE; } /* * Allocate and fill per-module unwind tables. */ static int init_module_unwind_tables(void) { ulong head = symbol_value("unwind_tables"); struct unwind_table *tbl; struct list_data ld; ulong *table_list; int cnt, i, n; BZERO(&ld, sizeof(ld)); ld.start = head; ld.member_offset = OFFSET(unwind_table_list); ld.flags = RETURN_ON_LIST_ERROR; if (CRASHDEBUG(1)) ld.flags |= VERBOSE; /* * Iterate through unwind table list and store start address of each * table in table_list. */ hq_open(); cnt = do_list(&ld); if (cnt == -1) { error(WARNING, "UNWIND: failed to gather unwind_table list\n"); hq_close(); return FALSE; } table_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(table_list, cnt); hq_close(); module_unwind_tables = calloc(sizeof(struct unwind_table), cnt); if (!module_unwind_tables) { error(WARNING, "UNWIND: failed to allocate memory for (%d tables)\n", cnt); FREEBUF(table_list); return FALSE; } /* we skip the first address as it is just head pointer */ for (i = 1, n = 0; i < cnt; i++, n++) { tbl = &module_unwind_tables[n]; if (!read_module_unwind_table(tbl, table_list[i])) goto fail; } /* just in case, zero the last entry (again) */ BZERO(&module_unwind_tables[n], sizeof(module_unwind_tables[n])); FREEBUF(table_list); return TRUE; fail: FREEBUF(table_list); while (--n >= 0) { tbl = &module_unwind_tables[n]; free(tbl->idx); } free(module_unwind_tables); module_unwind_tables = NULL; return FALSE; } /* * Read next unwind instruction pointed by ctrl->insn_kvaddr into * ctrl->insn. As a side-effect, increase the ctrl->insn_kvaddr to * point to the next instruction. */ static int unwind_get_insn(struct unwind_ctrl_block *ctrl) { if (readmem(ctrl->insn_kvaddr, KVADDR, &ctrl->insn, sizeof(ctrl->insn), "unwind insn", RETURN_ON_ERROR)) { ctrl->insn_kvaddr += sizeof(ctrl->insn); return TRUE; } return FALSE; } /* * Return next insn byte from ctl or 0 in case of failure. As a side-effect, * changes ctrl according the next byte. */ static ulong unwind_get_byte(struct unwind_ctrl_block *ctrl) { ulong ret; if (ctrl->entries <= 0) { error(WARNING, "UNWIND: corrupt unwind entry\n"); return 0; } ret = (ctrl->insn >> (ctrl->byte * 8)) & 0xff; if (!ctrl->byte && --ctrl->entries > 0) { if (!unwind_get_insn(ctrl)) return 0; ctrl->byte = 3; } else { ctrl->byte--; } return ret; } /* * Gets one value from stack pointed by vsp. */ static ulong get_value_from_stack(ulong *vsp) { ulong val; /* * We just read the value from kernel memory instead of peeking it from * the bt->stack. */ if (!readmem((ulong)vsp, KVADDR, &val, sizeof(val), "unwind stack value", RETURN_ON_ERROR)) { error(FATAL, "unwind: failed to read value from stack\n"); } return val; } /* * Execute the next unwind instruction. */ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) { ulong insn = unwind_get_byte(ctrl); if ((insn & 0xc0) == 0) { /* * 00xx xxxx: vsp = vsp + (xx xxx << 2) + 4 * * Note that it seems that there is a typo in the spec and this * is corrected in kernel. */ ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4; } else if ((insn & 0xc0) == 0x40) { /* 00xx xxxx: vsp = vsp + (xx xxx << 2) + 4 */ ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; } else if ((insn & 0xf0) == 0x80) { /* * Pop up to 12 integer registers under masks * {r15-r12}, {r11-r4}. */ ulong mask; ulong *vsp = (ulong *)ctrl->vrs[SP]; int load_sp, reg = 4; insn = (insn << 8) | unwind_get_byte(ctrl); mask = insn & 0x0fff; if (mask == 0) { error(WARNING, "UNWIND: refuse to unwind\n"); return FALSE; } /* pop {r4-r15} according to mask */ load_sp = mask & (1 << (13 - 4)); while (mask) { if (mask & 1) ctrl->vrs[reg] = get_value_from_stack(vsp++); mask >>= 1; reg++; } if (!load_sp) ctrl->vrs[SP] = (ulong)vsp; } else if ((insn & 0xf0) == 0x90 && (insn & 0x0d) != 0x0d) { /* 1001 nnnn: set vsp = r[nnnn] */ ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; } else if ((insn & 0xf0) == 0xa0) { /* * 1010 0nnn: pop r4-r[4+nnn] * 1010 1nnn: pop r4-r[4+nnn], r14 */ ulong *vsp = (ulong *)ctrl->vrs[SP]; int reg; for (reg = 4; reg <= 4 + (insn & 7); reg++) ctrl->vrs[reg] = get_value_from_stack(vsp++); if (insn & 0x80) ctrl->vrs[14] = get_value_from_stack(vsp++); ctrl->vrs[SP] = (ulong)vsp; } else if (insn == 0xb0) { /* 1011 0000: finish */ if (ctrl->vrs[PC] == 0) ctrl->vrs[PC] = ctrl->vrs[LR]; /* no further processing */ ctrl->entries = 0; } else if (insn == 0xb1) { /* 1011 0001 xxxx yyyy: spare */ ulong mask = unwind_get_byte(ctrl); ulong *vsp = (ulong *)ctrl->vrs[SP]; int reg = 0; if (mask == 0 || mask & 0xf0) { error(WARNING, "UNWIND: spare error\n"); return FALSE; } /* pop r0-r3 according to mask */ while (mask) { if (mask & 1) ctrl->vrs[reg] = get_value_from_stack(vsp++); mask >>= 1; reg++; } ctrl->vrs[SP] = (ulong)vsp; } else if (insn == 0xb2) { /* 1011 0010 uleb128: vsp = vsp + 0x204 (uleb128 << 2) */ ulong uleb128 = unwind_get_byte(ctrl); ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { error(WARNING, "UNWIND: unhandled instruction: %02lx\n", insn); return FALSE; } return TRUE; } static int is_core_kernel_text(ulong pc) { ulong text_start = machdep->machspec->kernel_text_start; ulong text_end = machdep->machspec->kernel_text_end; if (text_start && text_end) return (pc >= text_start && pc <= text_end); return FALSE; } static struct unwind_table * search_table(ulong ip) { /* * First check if this address is in the master kernel unwind table or * some of the module unwind tables. */ if (is_core_kernel_text(ip)) { return kernel_unwind_table; } else if (module_unwind_tables) { struct unwind_table *tbl; for (tbl = &module_unwind_tables[0]; tbl->idx; tbl++) { if (ip >= tbl->begin_addr && ip < tbl->end_addr) return tbl; } } return NULL; } static struct unwind_idx * search_index(const struct unwind_table *tbl, ulong ip) { struct unwind_idx *start = tbl->start; struct unwind_idx *end = tbl->end; /* * Do a binary search for the addresses in the index table. * Addresses are guaranteed to be sorted in ascending order. */ while (start < end - 1) { struct unwind_idx *mid = start + ((end - start + 1) >> 1); if (ip < mid->addr) end = mid; else start = mid; } return start; } /* * Convert a prel31 symbol to an absolute kernel virtual address. */ static ulong prel31_to_addr(ulong addr, ulong insn) { /* sign extend to 32 bits */ long offset = ((long)insn << 1) >> 1; return addr + offset; } static void index_prel31_to_addr(struct unwind_table *tbl) { struct unwind_idx *idx = tbl->start; ulong kvaddr = tbl->kv_base; for (; idx < tbl->end; idx++, kvaddr += sizeof(struct unwind_idx)) idx->addr = prel31_to_addr(kvaddr, idx->addr); } static int unwind_frame(struct stackframe *frame, ulong stacktop) { const struct unwind_table *tbl; struct unwind_ctrl_block ctrl; struct unwind_idx *idx; ulong low, high; low = frame->sp; high = stacktop; if (!is_kernel_text(frame->pc)) return FALSE; tbl = search_table(frame->pc); if (!tbl) { error(WARNING, "UNWIND: cannot find unwind table for %lx\n", frame->pc); return FALSE; } idx = search_index(tbl, frame->pc); ctrl.vrs[FP] = frame->fp; ctrl.vrs[SP] = frame->sp; ctrl.vrs[LR] = frame->lr; ctrl.vrs[PC] = 0; if (CRASHDEBUG(5)) { fprintf(fp, "UNWIND: >frame: FP=%lx\n", ctrl.vrs[FP]); fprintf(fp, "UNWIND: >frame: SP=%lx\n", ctrl.vrs[SP]); fprintf(fp, "UNWIND: >frame: LR=%lx\n", ctrl.vrs[LR]); fprintf(fp, "UNWIND: >frame: PC=%lx\n", ctrl.vrs[PC]); } if (idx->insn == 1) { /* can't unwind */ return FALSE; } else if ((idx->insn & 0x80000000) == 0) { /* insn contains prel31 offset to the EHT entry */ /* * Calculate a byte offset for idx->insn from the * start of our copy of the index table. This offset * is used to get a kernel virtual address of the * unwind index entry (idx_kvaddr). */ ulong idx_offset = (ulong)&idx->insn - (ulong)tbl->start; ulong idx_kvaddr = tbl->kv_base + idx_offset; /* * Now compute a kernel virtual address for the EHT * entry by adding prel31 offset (idx->insn) to the * unwind index entry address (idx_kvaddr) and read * the EHT entry. */ ctrl.insn_kvaddr = prel31_to_addr(idx_kvaddr, idx->insn); if (!unwind_get_insn(&ctrl)) return FALSE; } else if ((idx->insn & 0xff000000) == 0x80000000) { /* EHT entry is encoded in the insn itself */ ctrl.insn = idx->insn; } else { error(WARNING, "UNWIND: unsupported instruction %lx\n", idx->insn); return FALSE; } /* check the personality routine */ if ((ctrl.insn & 0xff000000) == 0x80000000) { /* personality routine 0 */ ctrl.byte = 2; ctrl.entries = 1; } else if ((ctrl.insn & 0xff000000) == 0x81000000) { /* personality routine 1 */ ctrl.byte = 1; ctrl.entries = 1 + ((ctrl.insn & 0x00ff0000) >> 16); } else { error(WARNING, "UNWIND: unsupported personality routine\n"); return FALSE; } /* now, execute the instructions */ while (ctrl.entries > 0) { if (!unwind_exec_insn(&ctrl)) { error(WARNING, "UNWIND: failed to exec instruction\n"); return FALSE; } if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high) return FALSE; } if (ctrl.vrs[PC] == 0) ctrl.vrs[PC] = ctrl.vrs[LR]; if (frame->pc == ctrl.vrs[PC]) return FALSE; frame->fp = ctrl.vrs[FP]; frame->sp = ctrl.vrs[SP]; frame->lr = ctrl.vrs[LR]; frame->pc = ctrl.vrs[PC]; if (CRASHDEBUG(5)) { fprintf(fp, "UNWIND: frameptr; frame.sp = bt->stkptr; frame.pc = bt->instptr; /* * In case bt->machdep contains pointer to a full register set, we take * LR from there. */ if (bt->machdep) { const struct arm_pt_regs *regs = bt->machdep; frame.fp = regs->ARM_fp; frame.lr = regs->ARM_lr; } while (IS_KVADDR(bt->instptr)) { if (!unwind_frame(&frame, bt->stacktop)) break; arm_dump_backtrace_entry(bt, n++, frame.lr, frame.sp); bt->instptr = frame.pc; bt->stkptr = frame.sp; } } #endif /* ARM */ crash-7.1.4/netdump.h0000664000000000000000000000734012634305150013153 0ustar rootroot/* netdump.h * * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: David Anderson */ #include #define MIN_NETDUMP_ELF32_HEADER_SIZE \ sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)+sizeof(Elf32_Phdr) #define MIN_NETDUMP_ELF64_HEADER_SIZE \ sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)+sizeof(Elf64_Phdr) #define MIN_NETDUMP_ELF_HEADER_SIZE \ MAX(MIN_NETDUMP_ELF32_HEADER_SIZE, MIN_NETDUMP_ELF64_HEADER_SIZE) #define NT_TASKSTRUCT 4 #define NT_DISKDUMP 0x70000001 #ifdef NOTDEF /* * Note: Based upon the original, abandoned, proposal for * its contents -- keep around for potential future use. */ #ifndef NT_KDUMPINFO #define NT_KDUMPINFO 7 #endif #endif /* NOTDEF */ struct pt_load_segment { off_t file_offset; physaddr_t phys_start; physaddr_t phys_end; physaddr_t zero_fill; }; struct vmcore_data { ulong flags; int ndfd; FILE *ofp; uint header_size; char *elf_header; uint num_pt_load_segments; struct pt_load_segment *pt_load_segments; Elf32_Ehdr *elf32; Elf32_Phdr *notes32; Elf32_Phdr *load32; Elf64_Ehdr *elf64; Elf64_Phdr *notes64; Elf64_Phdr *load64; void *nt_prstatus; void *nt_prpsinfo; void *nt_taskstruct; ulong task_struct; uint page_size; ulong switch_stack; uint num_prstatus_notes; void *nt_prstatus_percpu[NR_CPUS]; void *vmcoreinfo; uint size_vmcoreinfo; /* Backup Region, first 640K of System RAM. */ #define KEXEC_BACKUP_SRC_END 0x0009ffff uint num_qemu_notes; void *nt_qemu_percpu[NR_CPUS]; ulonglong backup_src_start; ulong backup_src_size; ulonglong backup_offset; }; #define DUMP_ELF_INCOMPLETE 0x1 /* dumpfile is incomplete */ /* * S390 CPU timer ELF note */ #ifndef NT_S390_TIMER #define NT_S390_TIMER 0x301 #endif /* * S390 TOD clock comparator ELF note */ #ifndef NT_S390_TODCMP #define NT_S390_TODCMP 0x302 #endif /* * S390 TOD programmable register ELF note */ #ifndef NT_S390_TODPREG #define NT_S390_TODPREG 0x303 #endif /* * S390 control registers ELF note */ #ifndef NT_S390_CTRS #define NT_S390_CTRS 0x304 #endif /* * S390 prefix ELF note */ #ifndef NT_S390_PREFIX #define NT_S390_PREFIX 0x305 #endif /* * S390 vector registers 0-15 upper half note (16 * u64) */ #ifndef NT_S390_VXRS_LOW #define NT_S390_VXRS_LOW 0x309 #endif /* * S390 vector registers 16-31 note (16 * u128) */ #ifndef NT_S390_VXRS_HIGH #define NT_S390_VXRS_HIGH 0x30a #endif #define MAX_KCORE_ELF_HEADER_SIZE (32768) struct proc_kcore_data { uint flags; uint segments; char *elf_header; Elf64_Ehdr *elf64; Elf64_Phdr *load64; Elf32_Ehdr *elf32; Elf32_Phdr *load32; }; struct QEMUCPUSegment { uint32_t selector; uint32_t limit; uint32_t flags; uint32_t pad; uint64_t base; }; typedef struct QEMUCPUSegment QEMUCPUSegment; struct QEMUCPUState { uint32_t version; uint32_t size; uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp; uint64_t r8, r9, r10, r11, r12, r13, r14, r15; uint64_t rip, rflags; QEMUCPUSegment cs, ds, es, fs, gs, ss; QEMUCPUSegment ldt, tr, gdt, idt; uint64_t cr[5]; }; typedef struct QEMUCPUState QEMUCPUState; crash-7.1.4/sadump.h0000664000000000000000000001467212634305150012776 0ustar rootroot/* * sadump.h - core analysis suite * * Copyright (c) 2011 FUJITSU LIMITED * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: HATAYAMA Daisuke */ #include #include typedef struct efi_time { uint16_t year; uint8_t month; uint8_t day; uint8_t hour; uint8_t minute; uint8_t second; uint8_t pad1; uint32_t nanosecond; #define EFI_UNSPECIFIED_TIMEZONE 2047 int16_t timezone; uint8_t daylight; uint8_t pad2; } efi_time_t; typedef struct { uint32_t data1; uint16_t data2; uint16_t data3; uint8_t data4[8]; } efi_guid_t; #define SADUMP_EFI_GUID_TEXT_REPR_LEN 36 struct sadump_part_header { #define SADUMP_SIGNATURE1 0x75646173 #define SADUMP_SIGNATURE2 0x0000706d uint32_t signature1; /* sadu */ uint32_t signature2; /* mp\0\0 */ uint32_t enable; /* set sadump service */ uint32_t reboot; /* number of seconds until reboot. 1-3600 */ uint32_t compress; /* memory image format. */ uint32_t recycle; /* dump device recycle */ uint32_t label[16]; /* reserve */ efi_guid_t sadump_id; /* system UUID */ efi_guid_t disk_set_id; /* disk set UUID */ efi_guid_t vol_id; /* device UUID */ efi_time_t time_stamp; /* time stamp */ uint32_t set_disk_set; /* device type */ #define SADUMP_MAX_DISK_SET_NUM 16 uint32_t reserve; /* Padding for Alignment */ uint64_t used_device; /* used device */ #define DUMP_PART_HEADER_MAGICNUM_SIZE 982 uint32_t magicnum[DUMP_PART_HEADER_MAGICNUM_SIZE]; /* magic number */ }; struct sadump_volume_info { efi_guid_t id; /* volume id */ uint64_t vol_size; /* device size */ uint32_t status; /* device status */ uint32_t cache_size; /* cache size */ }; struct sadump_disk_set_header { uint32_t disk_set_header_size; /* disk set header size */ uint32_t disk_num; /* disk number */ uint64_t disk_set_size; /* disk set size */ #define DUMP_DEVICE_MAX 16 struct sadump_volume_info vol_info[DUMP_DEVICE_MAX - 1]; /* struct VOL_INFO array */ }; struct sadump_header { #define SADUMP_SIGNATURE "sadump\0\0" char signature[8]; /* = "sadump\0\0" */ uint32_t header_version; /* Dump header version */ uint32_t reserve; /* Padding for Alignment */ efi_time_t timestamp; /* Time stamp */ uint32_t status; /* Above flags */ uint32_t compress; /* Above flags */ uint32_t block_size; /* Size of a block in byte */ #define SADUMP_DEFAULT_BLOCK_SIZE 4096 uint32_t extra_hdr_size; /* Size of host dependent * header in blocks (reserve) */ uint32_t sub_hdr_size; /* Size of arch dependent header in blocks */ uint32_t bitmap_blocks; /* Size of Memory bitmap in block */ uint32_t dumpable_bitmap_blocks; /* Size of Memory bitmap in block */ uint32_t max_mapnr; /* = max_mapnr */ uint32_t total_ram_blocks; /* Size of Memory in block */ uint32_t device_blocks; /* Number of total blocks in the dump device */ uint32_t written_blocks; /* Number of written blocks */ uint32_t current_cpu; /* CPU# which handles dump */ uint32_t nr_cpus; /* Number of CPUs */ /* * The members from below are supported in header version 1 * and later. */ uint64_t max_mapnr_64; uint64_t total_ram_blocks_64; uint64_t device_blocks_64; uint64_t written_blocks_64; }; struct sadump_apic_state { uint64_t ApicId; /* Local Apic ID register */ uint64_t Ldr; /* Logical Destination Register */ }; struct sadump_smram_cpu_state { uint64_t Reserved1[58]; uint32_t GdtUpper, LdtUpper, IdtUpper; uint32_t Reserved2[3]; uint64_t IoEip; uint64_t Reserved3[10]; uint32_t Cr4; uint32_t Reserved4[18]; uint32_t GdtLower; uint32_t GdtLimit; uint32_t IdtLower; uint32_t IdtLimit; uint32_t LdtLower; uint32_t LdtLimit; uint32_t LdtInfo; uint64_t Reserved5[6]; uint64_t Eptp; uint32_t EptpSetting; uint32_t Reserved6[5]; uint32_t Smbase; uint32_t SmmRevisionId; uint16_t IoInstructionRestart; uint16_t AutoHaltRestart; uint32_t Reserved7[6]; uint32_t R15Lower, R15Upper, R14Lower, R14Upper; uint32_t R13Lower, R13Upper, R12Lower, R12Upper; uint32_t R11Lower, R11Upper, R10Lower, R10Upper; uint32_t R9Lower, R9Upper, R8Lower, R8Upper; uint32_t RaxLower, RaxUpper, RcxLower, RcxUpper; uint32_t RdxLower, RdxUpper, RbxLower, RbxUpper; uint32_t RspLower, RspUpper, RbpLower, RbpUpper; uint32_t RsiLower, RsiUpper, RdiLower, RdiUpper; uint32_t IoMemAddrLower, IoMemAddrUpper; uint32_t IoMisc, Es, Cs, Ss, Ds, Fs, Gs; uint32_t Ldtr, Tr; uint64_t Dr7, Dr6, Rip, Ia32Efer, Rflags; uint64_t Cr3, Cr0; }; struct sadump_page_header { uint64_t page_flags; uint32_t size; uint32_t flags; }; struct sadump_media_header { efi_guid_t sadump_id; // system UUID efi_guid_t disk_set_id; // disk set UUID efi_time_t time_stamp; /* time stamp */ char sequential_num; // Medium sequential number char term_cord; // Termination cord char disk_set_header_size; // Size of original disk set header char disks_in_use; // Number of used disks of original dump device char reserve[4044]; // reserve feild }; #define divideup(x, y) (((x) + ((y) - 1)) / (y)) #define SADUMP_PF_SECTION_NUM 4096 struct sadump_diskset_data { char *filename; int dfd; struct sadump_part_header *header; ulong data_offset; }; struct sadump_data { char *filename; ulong flags; int dfd; /* dumpfile file descriptor */ int machine_type; /* machine type identifier */ struct sadump_part_header *header; struct sadump_header *dump_header; struct sadump_disk_set_header *diskset_header; struct sadump_media_header *media_header; char *bitmap; char *dumpable_bitmap; size_t sub_hdr_offset; uint32_t smram_cpu_state_size; ulong data_offset; int block_size; int block_shift; char *page_buf; uint64_t *block_table; int sd_list_len; struct sadump_diskset_data **sd_list; /* Backup Region, First 640K of System RAM. */ #define KEXEC_BACKUP_SRC_END 0x0009ffff ulonglong backup_src_start; ulong backup_src_size; ulonglong backup_offset; uint64_t max_mapnr; }; struct sadump_data *sadump_get_sadump_data(void); int sadump_cleanup_sadump_data(void); ulong sadump_identify_format(int *block_size); int sadump_get_smram_cpu_state(int apicid, struct sadump_smram_cpu_state *smram); crash-7.1.4/xen_dom0.h0000664000000000000000000000411112634305150013201 0ustar rootroot/* xen_dom0.h * * Copyright (C) 2015 David Anderson * Copyright (C) 2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: David Anderson */ /* * ELF note types for Xen dom0/hypervisor kdumps. * The comments below are from xen/include/public/elfnote.h. */ /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * For (temporary) backwards compatibility. */ #define NT_XEN_KDUMP_CR3 0x10000001 struct xen_kdump_data { ulong flags; ulong cr3; ulong p2m_mfn; char *page; ulong last_mfn_read; ulong last_pmd_read; ulong cache_hits; ulong accesses; int p2m_frames; ulong *p2m_mfn_frame_list; ulong xen_phys_start; int xen_major_version; int xen_minor_version; }; #define KDUMP_P2M_INIT (0x1) #define KDUMP_CR3 (0x2) #define KDUMP_MFN_LIST (0x4) #define P2M_FAILURE ((physaddr_t)(0xffffffffffffffffLL)) void dump_xen_kdump_data(FILE *); struct xen_kdump_data *get_xen_kdump_data(void); void process_xen_note(ulong, void *, size_t); physaddr_t xen_kdump_p2m(physaddr_t); crash-7.1.4/unwind_x86_32_64.c0000664000000000000000000010161612634305150014321 0ustar rootroot/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #if defined(X86_64) /* * Support for genarating DWARF CFI based backtraces. * Borrowed heavily from the kernel's implementation of unwinding using the * DWARF CFI written by Jan Beulich */ #ifdef X86_64 #include "unwind_x86_64.h" #endif #ifdef X86 #include "unwind_x86.h" #endif #include "defs.h" #define MAX_STACK_DEPTH 8 static struct local_unwind_table { struct { unsigned long pc; unsigned long range; } core, init; void *address; unsigned long size; } *local_unwind_tables, default_unwind_table; static int gather_in_memory_unwind_tables(void); static int populate_local_tables(ulong, char *); static int unwind_tables_cnt = 0; static struct local_unwind_table *find_table(unsigned long); static void dump_local_unwind_tables(void); static const struct { unsigned offs:BITS_PER_LONG / 2; unsigned width:BITS_PER_LONG / 2; } reg_info[] = { UNW_REGISTER_INFO }; #undef PTREGS_INFO #undef EXTRA_INFO #ifndef REG_INVALID #define REG_INVALID(r) (reg_info[r].width == 0) #endif #define DW_CFA_nop 0x00 #define DW_CFA_set_loc 0x01 #define DW_CFA_advance_loc1 0x02 #define DW_CFA_advance_loc2 0x03 #define DW_CFA_advance_loc4 0x04 #define DW_CFA_offset_extended 0x05 #define DW_CFA_restore_extended 0x06 #define DW_CFA_undefined 0x07 #define DW_CFA_same_value 0x08 #define DW_CFA_register 0x09 #define DW_CFA_remember_state 0x0a #define DW_CFA_restore_state 0x0b #define DW_CFA_def_cfa 0x0c #define DW_CFA_def_cfa_register 0x0d #define DW_CFA_def_cfa_offset 0x0e #define DW_CFA_def_cfa_expression 0x0f #define DW_CFA_expression 0x10 #define DW_CFA_offset_extended_sf 0x11 #define DW_CFA_def_cfa_sf 0x12 #define DW_CFA_def_cfa_offset_sf 0x13 #define DW_CFA_val_offset 0x14 #define DW_CFA_val_offset_sf 0x15 #define DW_CFA_val_expression 0x16 #define DW_CFA_lo_user 0x1c #define DW_CFA_GNU_window_save 0x2d #define DW_CFA_GNU_args_size 0x2e #define DW_CFA_GNU_negative_offset_extended 0x2f #define DW_CFA_hi_user 0x3f #define DW_EH_PE_FORM 0x07 #define DW_EH_PE_native 0x00 #define DW_EH_PE_leb128 0x01 #define DW_EH_PE_data2 0x02 #define DW_EH_PE_data4 0x03 #define DW_EH_PE_data8 0x04 #define DW_EH_PE_signed 0x08 #define DW_EH_PE_ADJUST 0x70 #define DW_EH_PE_abs 0x00 #define DW_EH_PE_pcrel 0x10 #define DW_EH_PE_textrel 0x20 #define DW_EH_PE_datarel 0x30 #define DW_EH_PE_funcrel 0x40 #define DW_EH_PE_aligned 0x50 #define DW_EH_PE_indirect 0x80 #define DW_EH_PE_omit 0xff #define min(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x < _y ? _x : _y; }) #define max(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x > _y ? _x : _y; }) #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) typedef unsigned long uleb128_t; typedef signed long sleb128_t; struct unwind_item { enum item_location { Nowhere, Memory, Register, Value } where; uleb128_t value; }; struct unwind_state { uleb128_t loc, org; const u8 *cieStart, *cieEnd; uleb128_t codeAlign; sleb128_t dataAlign; struct cfa { uleb128_t reg, offs; } cfa; struct unwind_item regs[ARRAY_SIZE(reg_info)]; unsigned stackDepth:8; unsigned version:8; const u8 *label; const u8 *stack[MAX_STACK_DEPTH]; }; static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 }; static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; uleb128_t value; unsigned shift; for (shift = 0, value = 0; cur < end; shift += 7) { if (shift + 7 > 8 * sizeof(value) && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { cur = end + 1; break; } value |= (uleb128_t)(*cur & 0x7f) << shift; if (!(*cur++ & 0x80)) break; } *pcur = cur; return value; } static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; sleb128_t value; unsigned shift; for (shift = 0, value = 0; cur < end; shift += 7) { if (shift + 7 > 8 * sizeof(value) && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { cur = end + 1; break; } value |= (sleb128_t)(*cur & 0x7f) << shift; if (!(*cur & 0x80)) { value |= -(*cur++ & 0x40) << shift; break; } } *pcur = cur; return value; } static unsigned long read_pointer(const u8 **pLoc, const void *end, signed ptrType) { unsigned long value = 0; union { const u8 *p8; const u16 *p16u; const s16 *p16s; const u32 *p32u; const s32 *p32s; const unsigned long *pul; } ptr; if (ptrType < 0 || ptrType == DW_EH_PE_omit) return 0; ptr.p8 = *pLoc; switch(ptrType & DW_EH_PE_FORM) { case DW_EH_PE_data2: if (end < (const void *)(ptr.p16u + 1)) return 0; if(ptrType & DW_EH_PE_signed) value = get_unaligned(ptr.p16s++); else value = get_unaligned(ptr.p16u++); break; case DW_EH_PE_data4: #ifdef CONFIG_64BIT if (end < (const void *)(ptr.p32u + 1)) return 0; if(ptrType & DW_EH_PE_signed) value = get_unaligned(ptr.p32s++); else value = get_unaligned(ptr.p32u++); break; case DW_EH_PE_data8: BUILD_BUG_ON(sizeof(u64) != sizeof(value)); #else BUILD_BUG_ON(sizeof(u32) != sizeof(value)); #endif case DW_EH_PE_native: if (end < (const void *)(ptr.pul + 1)) return 0; value = get_unaligned(ptr.pul++); break; case DW_EH_PE_leb128: BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value)); value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end) : get_uleb128(&ptr.p8, end); if ((const void *)ptr.p8 > end) return 0; break; default: return 0; } switch(ptrType & DW_EH_PE_ADJUST) { case DW_EH_PE_abs: break; case DW_EH_PE_pcrel: value += (unsigned long)*pLoc; break; default: return 0; } /* TBD if ((ptrType & DW_EH_PE_indirect) && __get_user(value, (unsigned long *)value)) return 0; */ *pLoc = ptr.p8; return value; } static signed fde_pointer_type(const u32 *cie) { const u8 *ptr = (const u8 *)(cie + 2); unsigned version = *ptr; if (version != 1) return -1; /* unsupported */ if (*++ptr) { const char *aug; const u8 *end = (const u8 *)(cie + 1) + *cie; uleb128_t len; /* check if augmentation size is first (and thus present) */ if (*ptr != 'z') return -1; /* check if augmentation string is nul-terminated */ if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL) return -1; ++ptr; /* skip terminator */ get_uleb128(&ptr, end); /* skip code alignment */ get_sleb128(&ptr, end); /* skip data alignment */ /* skip return address column */ version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end); len = get_uleb128(&ptr, end); /* augmentation length */ if (ptr + len < ptr || ptr + len > end) return -1; end = ptr + len; while (*++aug) { if (ptr >= end) return -1; switch(*aug) { case 'L': ++ptr; break; case 'P': { signed ptrType = *ptr++; if (!read_pointer(&ptr, end, ptrType) || ptr > end) return -1; } break; case 'R': return *ptr; default: return -1; } } } return DW_EH_PE_native|DW_EH_PE_abs; } static int advance_loc(unsigned long delta, struct unwind_state *state) { state->loc += delta * state->codeAlign; return delta > 0; } static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value, struct unwind_state *state) { if (reg < ARRAY_SIZE(state->regs)) { state->regs[reg].where = where; state->regs[reg].value = value; } } static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, signed ptrType, struct unwind_state *state) { union { const u8 *p8; const u16 *p16; const u32 *p32; } ptr; int result = 1; if (start != state->cieStart) { state->loc = state->org; result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state); if (targetLoc == 0 && state->label == NULL) return result; } for (ptr.p8 = start; result && ptr.p8 < end; ) { switch(*ptr.p8 >> 6) { uleb128_t value; case 0: switch(*ptr.p8++) { case DW_CFA_nop: break; case DW_CFA_set_loc: if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0) result = 0; break; case DW_CFA_advance_loc1: result = ptr.p8 < end && advance_loc(*ptr.p8++, state); break; case DW_CFA_advance_loc2: result = ptr.p8 <= end + 2 && advance_loc(*ptr.p16++, state); break; case DW_CFA_advance_loc4: result = ptr.p8 <= end + 4 && advance_loc(*ptr.p32++, state); break; case DW_CFA_offset_extended: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_val_offset: value = get_uleb128(&ptr.p8, end); set_rule(value, Value, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_offset_extended_sf: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, get_sleb128(&ptr.p8, end), state); break; case DW_CFA_val_offset_sf: value = get_uleb128(&ptr.p8, end); set_rule(value, Value, get_sleb128(&ptr.p8, end), state); break; case DW_CFA_restore_extended: case DW_CFA_undefined: case DW_CFA_same_value: set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state); break; case DW_CFA_register: value = get_uleb128(&ptr.p8, end); set_rule(value, Register, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_remember_state: if (ptr.p8 == state->label) { state->label = NULL; return 1; } if (state->stackDepth >= MAX_STACK_DEPTH) return 0; state->stack[state->stackDepth++] = ptr.p8; break; case DW_CFA_restore_state: if (state->stackDepth) { const uleb128_t loc = state->loc; const u8 *label = state->label; state->label = state->stack[state->stackDepth - 1]; memcpy(&state->cfa, &badCFA, sizeof(state->cfa)); memset(state->regs, 0, sizeof(state->regs)); state->stackDepth = 0; result = processCFI(start, end, 0, ptrType, state); state->loc = loc; state->label = label; } else return 0; break; case DW_CFA_def_cfa: state->cfa.reg = get_uleb128(&ptr.p8, end); /*nobreak*/ case DW_CFA_def_cfa_offset: state->cfa.offs = get_uleb128(&ptr.p8, end); break; case DW_CFA_def_cfa_sf: state->cfa.reg = get_uleb128(&ptr.p8, end); /*nobreak*/ case DW_CFA_def_cfa_offset_sf: state->cfa.offs = get_sleb128(&ptr.p8, end) * state->dataAlign; break; case DW_CFA_def_cfa_register: state->cfa.reg = get_uleb128(&ptr.p8, end); break; /*todo case DW_CFA_def_cfa_expression: */ /*todo case DW_CFA_expression: */ /*todo case DW_CFA_val_expression: */ case DW_CFA_GNU_args_size: get_uleb128(&ptr.p8, end); break; case DW_CFA_GNU_negative_offset_extended: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, (uleb128_t)0 - get_uleb128(&ptr.p8, end), state); break; case DW_CFA_GNU_window_save: default: result = 0; break; } break; case 1: result = advance_loc(*ptr.p8++ & 0x3f, state); break; case 2: value = *ptr.p8++ & 0x3f; set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); break; case 3: set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state); break; } if (ptr.p8 > end) result = 0; if (result && targetLoc != 0 && targetLoc < state->loc) return 1; } return result && ptr.p8 == end && (targetLoc == 0 || (/*todo While in theory this should apply, gcc in practice omits everything past the function prolog, and hence the location never reaches the end of the function. targetLoc < state->loc &&*/ state->label == NULL)); } /* Unwind to previous to frame. Returns 0 if successful, negative * number in case of an error. */ int unwind(struct unwind_frame_info *frame, int is_ehframe) { #define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs]) const u32 *fde = NULL, *cie = NULL; const u8 *ptr = NULL, *end = NULL; unsigned long startLoc = 0, endLoc = 0, cfa; unsigned i; signed ptrType = -1; uleb128_t retAddrReg = 0; // struct unwind_table *table; void *unwind_table; struct local_unwind_table *table; struct unwind_state state; u64 reg_ptr = 0; if (UNW_PC(frame) == 0) return -EINVAL; if ((table = find_table(UNW_PC(frame)))) { // unsigned long tableSize = unwind_table_size; unsigned long tableSize = table->size; unwind_table = table->address; for (fde = unwind_table; tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { if (!*fde || (*fde & (sizeof(*fde) - 1))) break; if (is_ehframe && !fde[1]) continue; /* this is a CIE */ else if (fde[1] == 0xffffffff) continue; /* this is a CIE */ if ((fde[1] & (sizeof(*fde) - 1)) || fde[1] > (unsigned long)(fde + 1) - (unsigned long)unwind_table) continue; /* this is not a valid FDE */ if (is_ehframe) cie = fde + 1 - fde[1] / sizeof(*fde); else cie = unwind_table + fde[1]; if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) || (*cie & (sizeof(*cie) - 1)) || (cie[1] != 0xffffffff && cie[1]) || (ptrType = fde_pointer_type(cie)) < 0) { cie = NULL; /* this is not a (valid) CIE */ continue; } ptr = (const u8 *)(fde + 2); startLoc = read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ptrType); endLoc = startLoc + read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ptrType & DW_EH_PE_indirect ? ptrType : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc) break; cie = NULL; } } if (cie != NULL) { memset(&state, 0, sizeof(state)); state.cieEnd = ptr; /* keep here temporarily */ ptr = (const u8 *)(cie + 2); end = (const u8 *)(cie + 1) + *cie; if ((state.version = *ptr) != 1) cie = NULL; /* unsupported version */ else if (*++ptr) { /* check if augmentation size is first (and thus present) */ if (*ptr == 'z') { /* check for ignorable (or already handled) * nul-terminated augmentation string */ while (++ptr < end && *ptr) if (strchr("LPR", *ptr) == NULL) break; } if (ptr >= end || *ptr) cie = NULL; } ++ptr; } if (cie != NULL) { /* get code aligment factor */ state.codeAlign = get_uleb128(&ptr, end); /* get data aligment factor */ state.dataAlign = get_sleb128(&ptr, end); if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end) cie = NULL; else { retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); /* skip augmentation */ if (((const char *)(cie + 2))[1] == 'z') ptr += get_uleb128(&ptr, end); if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info) || REG_INVALID(retAddrReg) || reg_info[retAddrReg].width != sizeof(unsigned long)) cie = NULL; } } if (cie != NULL) { state.cieStart = ptr; ptr = state.cieEnd; state.cieEnd = end; end = (const u8 *)(fde + 1) + *fde; /* skip augmentation */ if (((const char *)(cie + 2))[1] == 'z') { uleb128_t augSize = get_uleb128(&ptr, end); if ((ptr += augSize) > end) fde = NULL; } } if (cie == NULL || fde == NULL) return -ENXIO; state.org = startLoc; memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); /* process instructions */ if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state) || state.loc > endLoc || state.regs[retAddrReg].where == Nowhere || state.cfa.reg >= ARRAY_SIZE(reg_info) || reg_info[state.cfa.reg].width != sizeof(unsigned long) || state.cfa.offs % sizeof(unsigned long)) { return -EIO; } /* update frame */ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs; startLoc = min((unsigned long)UNW_SP(frame), cfa); endLoc = max((unsigned long)UNW_SP(frame), cfa); if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) { startLoc = min(STACK_LIMIT(cfa), cfa); endLoc = max(STACK_LIMIT(cfa), cfa); } #ifndef CONFIG_64BIT # define CASES CASE(8); CASE(16); CASE(32) #else # define CASES CASE(8); CASE(16); CASE(32); CASE(64) #endif for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { if (REG_INVALID(i)) { if (state.regs[i].where == Nowhere) continue; return -EIO; } switch(state.regs[i].where) { default: break; case Register: if (state.regs[i].value >= ARRAY_SIZE(reg_info) || REG_INVALID(state.regs[i].value) || reg_info[i].width > reg_info[state.regs[i].value].width){ return -EIO; } switch(reg_info[state.regs[i].value].width) { #define CASE(n) \ case sizeof(u##n): \ state.regs[i].value = FRAME_REG(state.regs[i].value, \ const u##n); \ break CASES; #undef CASE default: return -EIO; } break; } } for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { if (REG_INVALID(i)) continue; switch(state.regs[i].where) { case Nowhere: if (reg_info[i].width != sizeof(UNW_SP(frame)) || &FRAME_REG(i, __typeof__(UNW_SP(frame))) != &UNW_SP(frame)) continue; UNW_SP(frame) = cfa; break; case Register: switch(reg_info[i].width) { #define CASE(n) case sizeof(u##n): \ FRAME_REG(i, u##n) = state.regs[i].value; \ break CASES; #undef CASE default: return -EIO; } break; case Value: if (reg_info[i].width != sizeof(unsigned long)){ return -EIO;} FRAME_REG(i, unsigned long) = cfa + state.regs[i].value * state.dataAlign; break; case Memory: { unsigned long addr = cfa + state.regs[i].value * state.dataAlign; if ((state.regs[i].value * state.dataAlign) % sizeof(unsigned long) || addr < startLoc || addr + sizeof(unsigned long) < addr || addr + sizeof(unsigned long) > endLoc){ return -EIO;} switch(reg_info[i].width) { #define CASE(n) case sizeof(u##n): \ readmem(addr, KVADDR, ®_ptr,sizeof(u##n), "register", RETURN_ON_ERROR|QUIET); \ FRAME_REG(i, u##n) = (u##n)reg_ptr;\ break CASES; #undef CASE default: return -EIO; } } break; } } return 0; #undef CASES #undef FRAME_REG } /* * Initialize the unwind table(s) in the best-case order: * * 1. Use the in-memory kernel and module unwind tables. * 2. Use the in-memory kernel-only .eh_frame data. (possible?) * 3. Use the kernel-only .eh_frame data from the vmlinux file. */ void init_unwind_table(void) { ulong unwind_table_size; void *unwind_table; kt->flags &= ~DWARF_UNWIND; if (gather_in_memory_unwind_tables()) { if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY (%d tables)\n", unwind_tables_cnt); kt->flags |= DWARF_UNWIND_MEMORY; if (unwind_tables_cnt > 1) kt->flags |= DWARF_UNWIND_MODULES; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; return; } if (symbol_exists("__start_unwind") && symbol_exists("__end_unwind")) { unwind_table_size = symbol_value("__end_unwind") - symbol_value("__start_unwind"); if (!(unwind_table = malloc(unwind_table_size))) { error(WARNING, "cannot malloc unwind table space\n"); goto try_eh_frame; } if (!readmem(symbol_value("__start_unwind"), KVADDR, unwind_table, unwind_table_size, "unwind table", RETURN_ON_ERROR)) { error(WARNING, "cannot read unwind table data\n"); free(unwind_table); goto try_eh_frame; } kt->flags |= DWARF_UNWIND_MEMORY; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; default_unwind_table.size = unwind_table_size; default_unwind_table.address = unwind_table; if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY\n"); return; } try_eh_frame: if (st->dwarf_eh_frame_size || st->dwarf_debug_frame_size) { int fd; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); unwind_table_size = is_ehframe ? st->dwarf_eh_frame_size : st->dwarf_debug_frame_size; if (!(unwind_table = malloc(unwind_table_size))) { error(WARNING, "cannot malloc unwind table space\n"); return; } if ((fd = open(pc->namelist, O_RDONLY)) < 0) { error(WARNING, "cannot open %s for %s data\n", pc->namelist, is_ehframe ? ".eh_frame" : ".debug_frame"); free(unwind_table); return; } if (is_ehframe) lseek(fd, st->dwarf_eh_frame_file_offset, SEEK_SET); else lseek(fd, st->dwarf_debug_frame_file_offset, SEEK_SET); if (read(fd, unwind_table, unwind_table_size) != unwind_table_size) { if (CRASHDEBUG(1)) error(WARNING, "cannot read %s data from %s\n", is_ehframe ? ".eh_frame" : ".debug_frame", pc->namelist); free(unwind_table); return; } close(fd); default_unwind_table.size = unwind_table_size; default_unwind_table.address = unwind_table; kt->flags |= DWARF_UNWIND_EH_FRAME; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_EH_FRAME\n"); return; } } /* * Find the appropriate kernel-only "root_table" unwind_table, * and pass it to populate_local_tables() to do the heavy lifting. */ static int gather_in_memory_unwind_tables(void) { int i, cnt, found; struct syment *sp, *root_tables[10]; char *root_table_buf; char buf[BUFSIZE]; ulong name; STRUCT_SIZE_INIT(unwind_table, "unwind_table"); MEMBER_OFFSET_INIT(unwind_table_core, "unwind_table", "core"); MEMBER_OFFSET_INIT(unwind_table_init, "unwind_table", "init"); MEMBER_OFFSET_INIT(unwind_table_address, "unwind_table", "address"); MEMBER_OFFSET_INIT(unwind_table_size, "unwind_table", "size"); MEMBER_OFFSET_INIT(unwind_table_link, "unwind_table", "link"); MEMBER_OFFSET_INIT(unwind_table_name, "unwind_table", "name"); if (INVALID_SIZE(unwind_table) || INVALID_MEMBER(unwind_table_core) || INVALID_MEMBER(unwind_table_init) || INVALID_MEMBER(unwind_table_address) || INVALID_MEMBER(unwind_table_size) || INVALID_MEMBER(unwind_table_link) || INVALID_MEMBER(unwind_table_name)) { if (CRASHDEBUG(1)) error(NOTE, "unwind_table structure has changed, or does not exist in this kernel\n"); return 0; } /* * Unfortunately there are two kernel root_table symbols. */ if (!(cnt = get_syment_array("root_table", root_tables, 10))) return 0; root_table_buf = GETBUF(SIZE(unwind_table)); for (i = found = 0; i < cnt; i++) { sp = root_tables[i]; if (!readmem(sp->value, KVADDR, root_table_buf, SIZE(unwind_table), "root unwind_table", RETURN_ON_ERROR|QUIET)) goto gather_failed; name = ULONG(root_table_buf + OFFSET(unwind_table_name)); if (read_string(name, buf, strlen("kernel")+1) && STREQ("kernel", buf)) { found++; if (CRASHDEBUG(1)) fprintf(fp, "root_table name: %lx [%s]\n", name, buf); break; } } if (!found) goto gather_failed; cnt = populate_local_tables(sp->value, root_table_buf); FREEBUF(root_table_buf); return cnt; gather_failed: FREEBUF(root_table_buf); return 0; } /* * Transfer the relevant data from the kernel and module unwind_table * structures to the local_unwind_table structures. */ static int populate_local_tables(ulong root, char *buf) { struct list_data list_data, *ld; int i, cnt; ulong *table_list; ulong vaddr; struct local_unwind_table *tp; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->start = root; ld->member_offset = OFFSET(unwind_table_link); ld->flags = RETURN_ON_LIST_ERROR; if (CRASHDEBUG(1)) ld->flags |= VERBOSE; hq_open(); cnt = do_list(ld); if (cnt == -1) { error(WARNING, "UNWIND: failed to gather unwind_table list"); return 0; } table_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(table_list, cnt); hq_close(); if (!(local_unwind_tables = malloc(sizeof(struct local_unwind_table) * cnt))) { error(WARNING, "cannot malloc unwind_table space (%d tables)\n", cnt); FREEBUF(table_list); return 0; } for (i = 0; i < cnt; i++, tp++) { if (!readmem(table_list[i], KVADDR, buf, SIZE(unwind_table), "unwind_table", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot read unwind_table\n"); goto failed; } tp = &local_unwind_tables[i]; /* * Copy the required table info for find_table(). */ BCOPY(buf + OFFSET(unwind_table_core), (char *)&tp->core.pc, sizeof(ulong)*2); BCOPY(buf + OFFSET(unwind_table_init), (char *)&tp->init.pc, sizeof(ulong)*2); BCOPY(buf + OFFSET(unwind_table_size), (char *)&tp->size, sizeof(ulong)); /* * Then read the DWARF CFI data. */ vaddr = ULONG(buf + OFFSET(unwind_table_address)); if (!(tp->address = malloc(tp->size))) { error(WARNING, "cannot malloc unwind_table space\n"); goto failed; break; } if (!readmem(vaddr, KVADDR, tp->address, tp->size, "DWARF CFI data", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot read unwind_table data\n"); goto failed; } } unwind_tables_cnt = cnt; if (CRASHDEBUG(7)) dump_local_unwind_tables(); failed: FREEBUF(table_list); return unwind_tables_cnt; } /* * Find the unwind_table containing a pc. */ static struct local_unwind_table * find_table(unsigned long pc) { int i; struct local_unwind_table *tp, *table; table = &default_unwind_table; for (i = 0; i < unwind_tables_cnt; i++, tp++) { tp = &local_unwind_tables[i]; if ((pc >= tp->core.pc && pc < tp->core.pc + tp->core.range) || (pc >= tp->init.pc && pc < tp->init.pc + tp->init.range)) { table = tp; break; } } return table; } static void dump_local_unwind_tables(void) { int i, others; struct local_unwind_table *tp; others = 0; fprintf(fp, "DWARF flags: ("); if (kt->flags & DWARF_UNWIND) fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : ""); if (kt->flags & NO_DWARF_UNWIND) fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_MEMORY) fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_EH_FRAME) fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_MODULES) fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : ""); fprintf(fp, ")\n\n"); fprintf(fp, "default_unwind_table:\n"); fprintf(fp, " address: %lx\n", (ulong)default_unwind_table.address); fprintf(fp, " size: %ld\n\n", (ulong)default_unwind_table.size); fprintf(fp, "local_unwind_tables[%d]:\n", unwind_tables_cnt); for (i = 0; i < unwind_tables_cnt; i++, tp++) { tp = &local_unwind_tables[i]; fprintf(fp, "[%d]\n", i); fprintf(fp, " core: pc: %lx\n", tp->core.pc); fprintf(fp, " range: %ld\n", tp->core.range); fprintf(fp, " init: pc: %lx\n", tp->init.pc); fprintf(fp, " range: %ld\n", tp->init.range); fprintf(fp, " address: %lx\n", (ulong)tp->address); fprintf(fp, " size: %ld\n", tp->size); } } int dwarf_backtrace(struct bt_info *bt, int level, ulong stacktop) { unsigned long bp, offset; struct syment *sp; char *name; struct unwind_frame_info *frame; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); // frame->regs.rsp = bt->stkptr; // frame->regs.rip = bt->instptr; UNW_SP(frame) = bt->stkptr; UNW_PC(frame) = bt->instptr; /* read rbp from stack for non active tasks */ if (!(bt->flags & BT_DUMPFILE_SEARCH) && !bt->bptr) { // readmem(frame->regs.rsp, KVADDR, &bp, readmem(UNW_SP(frame), KVADDR, &bp, sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); frame->regs.rbp = bp; /* fixme for x86 */ } sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); goto bailout; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); if (CRASHDEBUG(2)) fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), UNW_PC(frame), frame->regs.rbp); while ((UNW_SP(frame) < stacktop) && !unwind(frame, is_ehframe) && UNW_PC(frame)) { /* To prevent rip pushed on IRQ stack being reported both * both on the IRQ and process stacks */ if ((bt->flags & BT_IRQSTACK) && (UNW_SP(frame) >= stacktop - 16)) break; level++; sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); break; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, "%s#%d [%016lx] %s at %016lx \n", level < 10 ? " " : "", level, UNW_SP(frame), name, UNW_PC(frame)); if (CRASHDEBUG(2)) fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), UNW_PC(frame), frame->regs.rbp); } bailout: FREEBUF(frame); return ++level; } int dwarf_print_stack_entry(struct bt_info *bt, int level) { unsigned long offset; struct syment *sp; char *name; struct unwind_frame_info *frame; frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); UNW_SP(frame) = bt->stkptr; UNW_PC(frame) = bt->instptr; sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); goto bailout; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); bailout: FREEBUF(frame); return level; } void dwarf_debug(struct bt_info *bt) { struct unwind_frame_info *frame; ulong bp; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); if (!bt->hp->eip) { dump_local_unwind_tables(); return; } if (!(kt->flags & DWARF_UNWIND_CAPABLE)) { error(INFO, "not DWARF capable\n"); return; } frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); /* * XXX: This only works for the first PC/SP pair seen in a normal * backtrace, so it's not particularly helpful. Ideally it should * be capable to take any PC/SP pair in a stack, but it appears to * related to the rbp value. */ UNW_PC(frame) = bt->hp->eip; UNW_SP(frame) = bt->hp->esp; readmem(UNW_SP(frame), KVADDR, &bp, sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); frame->regs.rbp = bp; /* fixme for x86 */ unwind(frame, is_ehframe); fprintf(fp, "frame size: %lx (%lx)\n", (ulong)UNW_SP(frame), (ulong)UNW_SP(frame) - bt->hp->esp); FREEBUF(frame); } #endif crash-7.1.4/va_server.c0000775000000000000000000002414212634305150013470 0ustar rootroot/* va_server.c - kernel crash dump file translation library * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2011, 2013 David Anderson * Copyright (C) 2002-2006, 2011, 2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * 10/99, Dave Winchell, Initial release for kernel crash dump support. * 11/12/99, Dave Winchell, Add support for in memory dumps. */ #include #include #include #include #include #include #include #include #include "va_server.h" #include #include #include struct map_hdr *vas_map_base = (struct map_hdr *)0; /* base of tree */ #ifdef NOT_DEF #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(page_size - 1)))) #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(page_size - 1))) #endif u_long vas_base_va; u_long vas_start_va; FILE *vas_file_p; char *zero_page; int vas_version; int read_map(char *crash_file); void load_data(struct crash_map_entry *m); int find_data(u_long va, u_long *buf, u_long *len, u_long *offset); u_long vas_find_end(void); int vas_free_memory(char *); int vas_memory_used(void); int vas_memory_dump(FILE *); int mclx_page_size(void); void set_vas_debug(ulong); extern int monitor_memory(long *, long *, long *, long *); int Page_Size; ulong vas_debug = 0; extern void *malloc(size_t); int va_server_init(char *crash_file, u_long *start, u_long *end, u_long *stride) { Page_Size = getpagesize(); /* temporary setting until disk header is read */ if(read_map(crash_file)) { if(va_server_init_v1(crash_file, start, end, stride)) return -1; vas_version = 1; return 0; } vas_version = 2; zero_page = (char *)malloc(Page_Size); bzero((void *)zero_page, Page_Size); vas_base_va = vas_start_va = vas_map_base->map[0].start_va; if(start) *start = vas_start_va; if(end) { *end = vas_find_end(); } if(stride) *stride = Page_Size; return 0; } int vas_lseek(u_long position, int whence) { if(vas_version < 2) return vas_lseek_v1(position, whence); if(whence != SEEK_SET) return -1; vas_base_va = vas_start_va + position; return 0; } size_t vas_read(void *buf_in, size_t count) { u_long len, offset, buf, va; u_long num, output, remaining; if(vas_version < 2) return vas_read_v1(buf_in, count); va = vas_base_va; remaining = count; output = (u_long)buf_in; while(remaining) { find_data(va, &buf, &len, &offset); num = (remaining > (len - offset)) ? (len - offset) : remaining; bcopy((const void *)(buf+offset), (void *)output, num); remaining -= num; va += num; output += num; } vas_base_va += count; return count; } size_t vas_write(void *buf_in, size_t count) { u_long len, offset, buf, va; if(vas_version < 2) return vas_write_v1(buf_in, count); if(count != sizeof(u_long)) { printf("count %d not %d\n", (int)count, (int)sizeof(u_long)); return -1; } va = vas_base_va; if(!find_data(va, &buf, &len, &offset)) *(u_long *)(buf+offset) = *(u_long *)buf_in; vas_base_va += count; return count; } void vas_free_data(u_long va) { struct crash_map_entry *m, *last_m; if(vas_version < 2) { vas_free_data_v1(va); return; } m = last_m = vas_map_base->map; for(;m->start_va;) { if(m->start_va > va) break; last_m = m; m++; } if(last_m->exp_data) { free((void *)last_m->exp_data); last_m->exp_data = 0; } } u_long vas_find_end(void) { struct crash_map_entry *m; u_long *sub_m; m = vas_map_base->map; for(;m->start_va;m++) ; m--; load_data(m); sub_m = (u_long *)m->exp_data; for(;*sub_m; sub_m++) ; sub_m--; return *sub_m; } int find_data(u_long va, u_long *buf, u_long *len, u_long *offset) { u_long off; struct crash_map_entry *m, *last_m; u_long *sub_m, va_saved; char *data; int saved; m = last_m = vas_map_base->map; for(;m->start_va;) { if(m->start_va > va) break; last_m = m; m++; } load_data(last_m); sub_m = (u_long *)last_m->exp_data; data = last_m->exp_data + CRASH_SUB_MAP_PAGES*Page_Size; saved = 0; for(;*sub_m; sub_m++, data += Page_Size) { va_saved = *sub_m; if((va >= va_saved) && (va < (va_saved + Page_Size))) { saved = 1; break; } else if(va < va_saved) break; } off = va - (u_long)trunc_page(va); if(offset) *offset = off; if(len) *len = Page_Size; if (vas_debug && !saved) fprintf(stderr, "find_data: page containing %lx not saved\n", (u_long)trunc_page(va)); if(buf) *buf = saved ? (u_long)data : (u_long)zero_page; return (saved ^ 1); } void load_data(struct crash_map_entry *m) { char *compr_buf; char *exp_buf; int ret, items; uLongf destLen; int retries; if(m->exp_data) goto out; ret = fseek(vas_file_p, (long)(m->start_blk * Page_Size), SEEK_SET); if(ret == -1) { printf("load_data: unable to fseek, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } retries = 0; load_data_retry1: compr_buf = (char *)malloc(m->num_blks * Page_Size); if(!compr_buf) { if (retries++ == 0) { vas_free_memory("malloc failure: out of memory"); goto load_data_retry1; } fprintf(stderr, "FATAL ERROR: malloc failure: out of memory\n"); clean_exit(1); } items = fread((void *)compr_buf, sizeof(char), m->num_blks * Page_Size, vas_file_p); if(items != m->num_blks * Page_Size) { printf("unable to read blocks from errno = %d\n", ferror(vas_file_p)); clean_exit(1); } load_data_retry2: m->exp_data = exp_buf = (char *)malloc((CRASH_SOURCE_PAGES+CRASH_SUB_MAP_PAGES) * Page_Size); if(!exp_buf) { if (retries++ == 0) { vas_free_memory("malloc failure: out of memory"); goto load_data_retry2; } fprintf(stderr, "FATAL ERROR: malloc failure: out of memory\n"); clean_exit(1); } destLen = (uLongf)((CRASH_SOURCE_PAGES+CRASH_SUB_MAP_PAGES) * Page_Size); ret = uncompress((Bytef *)exp_buf, &destLen, (const Bytef *)compr_buf, (uLong)items); if(ret) { if(ret == Z_MEM_ERROR) printf("load_data, bad ret Z_MEM_ERROR from uncompress\n"); else if(ret == Z_BUF_ERROR) printf("load_data, bad ret Z_BUF_ERROR from uncompress\n"); else if(ret == Z_DATA_ERROR) printf("load_data, bad ret Z_DATA_ERROR from uncompress\n"); else printf("load_data, bad ret %d from uncompress\n", ret); clean_exit(1); } free((void *)compr_buf); out: return; } int read_map(char *crash_file) { struct crash_map_hdr *disk_hdr; int ret, items; struct map_hdr *hdr; vas_file_p = fopen(crash_file, "r"); if(vas_file_p == (FILE *)0) { printf("read_maps: bad ret from fopen for %s: %s\n", crash_file, strerror(errno)); return -1; } hdr = (struct map_hdr *)malloc(sizeof(struct map_hdr)); if(!hdr) { printf("read_map: unable to malloc mem\n"); return -1; } bzero((void *)hdr, sizeof(struct map_hdr)); disk_hdr = (struct crash_map_hdr *)malloc(Page_Size); ret = fseek(vas_file_p, (long)0, SEEK_SET); if(ret == -1) { printf("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); free(disk_hdr); return -1; } items = fread((void *)disk_hdr, 1, Page_Size, vas_file_p); if(items != Page_Size) { return -1; } if(disk_hdr->magic[0] != CRASH_MAGIC) { return -1; } ret = fseek(vas_file_p, (long)((disk_hdr->map_block) * disk_hdr->blk_size), SEEK_SET); if(ret == -1) { printf("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); return -1; } Page_Size = disk_hdr->blk_size; /* over-ride PAGE_SIZE */ hdr->blk_size = disk_hdr->blk_size; hdr->map = (struct crash_map_entry *)malloc(disk_hdr->map_blocks * disk_hdr->blk_size); items = fread((void *)hdr->map, hdr->blk_size, disk_hdr->map_blocks, vas_file_p); if(items != disk_hdr->map_blocks) { printf("unable to read map entries, err = %d\n", errno); return -1; } vas_map_base = hdr; return 0; } int vas_free_memory(char *s) { struct crash_map_entry *m; long swap_usage; int blks; if (vas_version < 2) return 0; if (s) { fprintf(stderr, "\nWARNING: %s ", s); if (monitor_memory(NULL, NULL, NULL, &swap_usage)) fprintf(stderr, "(swap space usage: %ld%%)", swap_usage); fprintf(stderr, "\nWARNING: memory/swap exhaustion may cause this session to be killed\n"); } for (blks = 0, m = vas_map_base->map; m->start_va; m++) { if (m->exp_data) { free((void *)m->exp_data); m->exp_data = 0; blks += m->num_blks; } } return blks; } int vas_memory_used(void) { struct crash_map_entry *m; int blks; if (vas_version < 2) return 0; for (blks = 0, m = vas_map_base->map; m->start_va; m++) { if (m->exp_data) blks += m->num_blks; } return blks; } char *memory_dump_hdr_32 = "START_VA EXP_DATA START_BLK NUM_BLKS\n"; char *memory_dump_fmt_32 = "%8lx %8lx %9d %8d\n"; char *memory_dump_hdr_64 = \ " START_VA EXP_DATA START_BLK NUM_BLKS\n"; char *memory_dump_fmt_64 = "%16lx %16lx %9d %8d\n"; int vas_memory_dump(FILE *fp) { struct crash_map_entry *m; char *hdr, *fmt; int blks; if (vas_version < 2) { fprintf(fp, "%s\n", vas_version ? "version 1: not supported" : "no dumpfile"); return 0; } hdr = sizeof(long) == 4 ? memory_dump_hdr_32 : memory_dump_hdr_64; fmt = sizeof(long) == 4 ? memory_dump_fmt_32 : memory_dump_fmt_64; fprintf(fp, "%s", hdr); for (blks = 0, m = vas_map_base->map; m->start_va; m++) { fprintf(fp, fmt, m->start_va, m->exp_data, m->start_blk, m->num_blks); if (m->exp_data) blks += m->num_blks; } fprintf(fp, "total blocks: %d\n", blks); return blks; } int mclx_page_size(void) { return (Page_Size); } void set_vas_debug(ulong value) { vas_debug = value; } crash-7.1.4/lkcd_v5.c0000775000000000000000000003103212634305150013017 0ustar rootroot/* lkcd_v5.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #include "lkcd_dump_v5.h" static dump_header_t dump_header_v5 = { 0 }; static dump_page_t dump_page = { 0 }; static void mclx_cache_page_headers_v5(void); /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v5(FILE *fp, int fd) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; lkcd->fd = fd; lkcd->fp = fp; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v5; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; lkcd->dump_page = dp; lkcd->dump_header = dh; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->total_pages = dh->dh_num_pages; lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->zoned_offsets = 0; lkcd->get_dp_flags = get_dp_flags_v5; lkcd->get_dp_address = get_dp_address_v5; lkcd->compression = dh->dh_dump_compress; lkcd->page_header_size = sizeof(dump_page_t); lkcd->get_dp_size = get_dp_size_v5; lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: lkcd_print("reached EOF\n"); eof = TRUE; continue; } if (dp->dp_flags & ~(DUMP_DH_COMPRESSED|DUMP_DH_RAW|DUMP_DH_END|LKCD_DUMP_MCLX_V0)) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) lkcd->flags |= LKCD_MCLX; if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_DH_END) { lkcd_print("found DUMP_DH_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(2)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (dh->dh_version & LKCD_DUMP_MCLX_V1) mclx_cache_page_headers_v5(); if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v5(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v5(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v5(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v5(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else if (dh->dh_magic_number == DUMP_MAGIC_LIVE) lkcd_print("(DUMP_MAGIC_LIVE)\n"); else lkcd_print("(?)\n"); others = 0; lkcd_print(" dh_version: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_version); switch (dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) { case LKCD_DUMP_V1: lkcd_print("%sLKCD_DUMP_V1", others++ ? "|" : ""); break; case LKCD_DUMP_V2: lkcd_print("%sLKCD_DUMP_V2", others++ ? "|" : ""); break; case LKCD_DUMP_V3: lkcd_print("%sLKCD_DUMP_V3", others++ ? "|" : ""); break; case LKCD_DUMP_V5: lkcd_print("%sLKCD_DUMP_V5", others++ ? "|" : ""); break; } if (dh->dh_version & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); if (dh->dh_version & LKCD_DUMP_MCLX_V1) lkcd_print("%sLKCD_DUMP_MCLX_V1", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_level); others = 0; if (dh->dh_dump_level & DUMP_LEVEL_HEADER) lkcd_print("%sDUMP_LEVEL_HEADER", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_KERN) lkcd_print("%sDUMP_LEVEL_KERN", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_USED) lkcd_print("%sDUMP_LEVEL_USED", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_ALL) lkcd_print("%sDUMP_LEVEL_ALL", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_page_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: "); lkcd_print(BITS32() ? "%lld\n" : "%ld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_end); lkcd_print(" dh_num_pages: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && dh->dh_panic_string && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print("dh_utsname_sysname: %s\n", dh->dh_utsname_sysname); lkcd_print("dh_utsname_nodename: %s\n", dh->dh_utsname_nodename); lkcd_print("dh_utsname_release: %s\n", dh->dh_utsname_release); lkcd_print("dh_utsname_version: %s\n", dh->dh_utsname_version); lkcd_print("dh_utsname_machine: %s\n", dh->dh_utsname_machine); lkcd_print("dh_utsname_domainname: %s\n", dh->dh_utsname_domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_dump_compress: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_compress); others = 0; if (dh->dh_dump_compress == DUMP_COMPRESS_NONE) lkcd_print("%sDUMP_COMPRESS_NONE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_RLE) lkcd_print("%sDUMP_COMPRESS_RLE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_GZIP) lkcd_print("%sDUMP_COMPRESS_GZIP", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_flags: "); others = 0; lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_flags); if (dh->dh_dump_flags & DUMP_FLAGS_NONDISRUPT) lkcd_print("%sDUMP_FLAGS_NONDISRUPT", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_device: "); lkcd_print(BITS32() ? "%lx\n" : "%x\n", dh->dh_dump_device); if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dp->dp_address); lkcd_print(" dp_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dp->dp_size); lkcd_print(" dp_flags: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_DH_COMPRESSED) lkcd_print("DUMP_DH_COMPRESSED", others++); if (dp->dp_flags & DUMP_DH_RAW) lkcd_print("%sDUMP_DH_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_DH_END) lkcd_print("%sDUMP_DH_END", others++ ? "|" : ""); if (dp->dp_flags & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); lkcd_print(")\n"); } void dump_dump_page_v5(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console(BITS32() ? "dp_address: %llx " : "dp_address: %lx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_DH_COMPRESSED) console("DUMP_DH_COMPRESSED", others++); if (flags & DUMP_DH_RAW) console("%sDUMP_DH_RAW", others++ ? "|" : ""); if (flags & DUMP_DH_END) console("%sDUMP_DH_END", others++ ? "|" : ""); if (flags & LKCD_DUMP_MCLX_V0) console("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); console(")\n"); } /* * Read the MCLX-enhanced page header cache. Verify the first one, which * is a pointer to the page header for address 1MB, and take the rest at * blind faith. Note that the page headers do not include the 64K dump * header offset, which must be added to the values found. */ static void mclx_cache_page_headers_v5(void) { int i; uint64_t physaddr1, physaddr2, page_headers[MCLX_PAGE_HEADERS]; dump_page_t dump_page, *dp; ulong granularity; if (LKCD_DEBUG(2)) /* dump headers have all been read */ return; if (lkcd->total_pages > MEGABYTES(1))/* greater than 4G not supported */ return; if (lseek(lkcd->fd, sizeof(dump_header_t), SEEK_SET) == -1) return; if (read(lkcd->fd, page_headers, MCLX_V1_PAGE_HEADER_CACHE) != MCLX_V1_PAGE_HEADER_CACHE) return; dp = &dump_page; /* * Determine the granularity between offsets. */ if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr2 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if ((physaddr1 % MEGABYTES(1)) || (physaddr2 % MEGABYTES(1)) || (physaddr2 < physaddr1)) return; granularity = physaddr2 - physaddr1; for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { if (!page_headers[i]) break; lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE; set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } crash-7.1.4/lkcd_v7.c0000664000000000000000000003516712634305150013033 0ustar rootroot/* lkcd_v7.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002 Silicon Graphics, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #include "lkcd_dump_v5.h" /* REMIND */ static dump_header_t dump_header_v7 = { 0 }; static dump_page_t dump_page = { 0 }; static void mclx_cache_page_headers_v7(void); /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v7(FILE *fp, int fd, char *dumpfile) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; int dump_index_size ATTRIBUTE_UNUSED; int dump_index_created ATTRIBUTE_UNUSED; static char dumpfile_index_name[128]; int ifd ATTRIBUTE_UNUSED; lkcd->fd = fd; lkcd->fp = fp; dump_index_created = 0; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v7; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; lkcd->dump_page = dp; lkcd->dump_header = dh; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); #ifdef IA64 if ( (fix_addr_v7(fd) == -1) ) return FALSE; #endif /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->total_pages = dh->dh_num_pages; /* * REMIND: dh_memory_size should be in physical pages and seems to be wrong. * pad by two for now; 3DFE8 should be 40000. */ lkcd->memory_pages = dh->dh_memory_size; lkcd->page_offsets = 0; lkcd->ifd = -1; lkcd->dumpfile_index = NULL; /* Keep from getting unused warnings */ dump_index_size = 0; dump_index_created = 0; strcpy(dumpfile_index_name, dumpfile); ifd = 0; #ifdef LKCD_INDEX_FILE if (dh->dh_memory_end < 0x1000000000LL) { lkcd->memory_pages = dh->dh_memory_end / lkcd->page_size + 1; } else { lkcd->memory_pages = (dh->dh_memory_size * (getpagesize()/lkcd->page_size)) * 2; } dump_index_size = (lkcd->memory_pages * sizeof(off_t)); lkcd->page_offsets = 0; strcpy(dumpfile_index_name, dumpfile); lkcd->dumpfile_index = strcat(dumpfile_index_name, ".index"); ifd = open(lkcd->dumpfile_index, O_RDWR, 0644); if( ifd < 0 ) { int err; ifd = open(lkcd->dumpfile_index, (O_RDWR | O_CREAT), 0644); if (ifd > 0) { err = ftruncate(ifd, dump_index_size); if (err == -1) { lkcd->dumpfile_index = NULL; close(ifd); ifd = -1; } else { dump_index_created++; } } } if (ifd >= 0) { /* MAP_SHARED so we can sync the file */ lkcd->page_offsets = mmap( (void *)0, dump_index_size, (PROT_READ | PROT_WRITE), MAP_SHARED, ifd, (off_t)0); if (lkcd->page_offsets == MAP_FAILED) { close(ifd); ifd = -1; lkcd->dumpfile_index = NULL; lkcd->page_offsets = 0; } } lkcd->ifd = ifd; #endif lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->zoned_offsets = 0; lkcd->get_dp_flags = get_dp_flags_v7; lkcd->get_dp_address = get_dp_address_v7; lkcd->get_dp_size = get_dp_size_v7; lkcd->compression = dh->dh_dump_compress; lkcd->page_header_size = sizeof(dump_page_t); lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); /* * Read all of the pages and save the page offsets for lkcd_lseek(). */ for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: lkcd_print("reached EOF\n"); eof = TRUE; continue; } if (dp->dp_flags & ~(DUMP_DH_COMPRESSED|DUMP_DH_RAW|DUMP_DH_END|LKCD_DUMP_MCLX_V0)) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) lkcd->flags |= LKCD_MCLX; if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_DH_END) { lkcd_print("found DUMP_DH_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(2)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (dh->dh_version & LKCD_DUMP_MCLX_V1) mclx_cache_page_headers_v7(); if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v7(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v7(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v7(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v7(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else if (dh->dh_magic_number == DUMP_MAGIC_LIVE) lkcd_print("(DUMP_MAGIC_LIVE)\n"); else lkcd_print("(?)\n"); others = 0; lkcd_print(" dh_version: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_version); switch (dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) { case LKCD_DUMP_V1: lkcd_print("%sLKCD_DUMP_V1", others++ ? "|" : ""); break; case LKCD_DUMP_V2: lkcd_print("%sLKCD_DUMP_V2", others++ ? "|" : ""); break; case LKCD_DUMP_V3: lkcd_print("%sLKCD_DUMP_V3", others++ ? "|" : ""); break; case LKCD_DUMP_V5: lkcd_print("%sLKCD_DUMP_V5", others++ ? "|" : ""); break; case LKCD_DUMP_V7: lkcd_print("%sLKCD_DUMP_V7", others++ ? "|" : ""); break; case LKCD_DUMP_V8: lkcd_print("%sLKCD_DUMP_V8", others++ ? "|" : ""); break; } if (dh->dh_version & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); if (dh->dh_version & LKCD_DUMP_MCLX_V1) lkcd_print("%sLKCD_DUMP_MCLX_V1", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_level); others = 0; if (dh->dh_dump_level & DUMP_LEVEL_HEADER) lkcd_print("%sDUMP_LEVEL_HEADER", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_KERN) lkcd_print("%sDUMP_LEVEL_KERN", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_USED) lkcd_print("%sDUMP_LEVEL_USED", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_ALL) lkcd_print("%sDUMP_LEVEL_ALL", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_page_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: "); lkcd_print(BITS32() ? "%lld\n" : "%ld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_end); lkcd_print(" dh_num_pages: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && dh->dh_panic_string && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print("dh_utsname_sysname: %s\n", dh->dh_utsname_sysname); lkcd_print("dh_utsname_nodename: %s\n", dh->dh_utsname_nodename); lkcd_print("dh_utsname_release: %s\n", dh->dh_utsname_release); lkcd_print("dh_utsname_version: %s\n", dh->dh_utsname_version); lkcd_print("dh_utsname_machine: %s\n", dh->dh_utsname_machine); lkcd_print("dh_utsname_domainname: %s\n", dh->dh_utsname_domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_dump_compress: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_compress); others = 0; if (dh->dh_dump_compress == DUMP_COMPRESS_NONE) lkcd_print("%sDUMP_COMPRESS_NONE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_RLE) lkcd_print("%sDUMP_COMPRESS_RLE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_GZIP) lkcd_print("%sDUMP_COMPRESS_GZIP", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_flags: "); others = 0; lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_flags); if (dh->dh_dump_flags & DUMP_FLAGS_NONDISRUPT) lkcd_print("%sDUMP_FLAGS_NONDISRUPT", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_device: "); lkcd_print(BITS32() ? "%lx\n" : "%x\n", dh->dh_dump_device); if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dp->dp_address); lkcd_print(" dp_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dp->dp_size); lkcd_print(" dp_flags: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_DH_COMPRESSED) lkcd_print("DUMP_DH_COMPRESSED", others++); if (dp->dp_flags & DUMP_DH_RAW) lkcd_print("%sDUMP_DH_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_DH_END) lkcd_print("%sDUMP_DH_END", others++ ? "|" : ""); if (dp->dp_flags & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); lkcd_print(")\n"); } void dump_dump_page_v7(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console(BITS32() ? "dp_address: %llx " : "dp_address: %lx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_DH_COMPRESSED) console("DUMP_DH_COMPRESSED", others++); if (flags & DUMP_DH_RAW) console("%sDUMP_DH_RAW", others++ ? "|" : ""); if (flags & DUMP_DH_END) console("%sDUMP_DH_END", others++ ? "|" : ""); if (flags & LKCD_DUMP_MCLX_V0) console("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); console(")\n"); } /* * Read the MCLX-enhanced page header cache. Verify the first one, which * is a pointer to the page header for address 1MB, and take the rest at * blind faith. Note that the page headers do not include the 64K dump * header offset, which must be added to the values found. */ static void mclx_cache_page_headers_v7(void) { int i; uint64_t physaddr1, physaddr2, page_headers[MCLX_PAGE_HEADERS]; dump_page_t dump_page, *dp; ulong granularity; if (LKCD_DEBUG(2)) /* dump headers have all been read */ return; if (lkcd->total_pages > MEGABYTES(1))/* greater than 4G not supported */ return; if (lseek(lkcd->fd, sizeof(dump_header_t), SEEK_SET) == -1) return; if (read(lkcd->fd, page_headers, MCLX_V1_PAGE_HEADER_CACHE) != MCLX_V1_PAGE_HEADER_CACHE) return; dp = &dump_page; /* * Determine the granularity between offsets. */ if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr2 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if ((physaddr1 % MEGABYTES(1)) || (physaddr2 % MEGABYTES(1)) || (physaddr2 < physaddr1)) return; granularity = physaddr2 - physaddr1; for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { if (!page_headers[i]) break; lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE; set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } crash-7.1.4/ppc64.c0000775000000000000000000026145712634305150012444 0ustar rootroot/* ppc64.c -- core analysis suite * * Copyright (C) 2004-2015 David Anderson * Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved. * Copyright (C) 2004, 2006 Haren Myneni, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef PPC64 #include "defs.h" static int ppc64_kvtop(struct task_context *, ulong, physaddr_t *, int); static int ppc64_uvtop(struct task_context *, ulong, physaddr_t *, int); static ulong ppc64_vmalloc_start(void); static int ppc64_vmemmap_to_phys(ulong, physaddr_t *, int); static int ppc64_is_task_addr(ulong); static int ppc64_verify_symbol(const char *, ulong, char); static ulong ppc64_get_task_pgd(ulong); static int ppc64_translate_pte(ulong, void *, ulonglong); static ulong ppc64_processor_speed(void); static int ppc64_eframe_search(struct bt_info *); static void ppc64_back_trace_cmd(struct bt_info *); static void ppc64_back_trace(struct gnu_request *, struct bt_info *); static void get_ppc64_frame(struct bt_info *, ulong *, ulong *); static void ppc64_print_stack_entry(int,struct gnu_request *, ulong, ulong, struct bt_info *); static void ppc64_dump_irq(int); static ulong ppc64_get_sp(ulong); static void ppc64_get_stack_frame(struct bt_info *, ulong *, ulong *); static int ppc64_dis_filter(ulong, char *, unsigned int); static void ppc64_cmd_mach(void); static int ppc64_get_smp_cpus(void); static void ppc64_display_machine_stats(void); static void ppc64_dump_line_number(ulong); static struct line_number_hook ppc64_line_number_hooks[]; static ulong ppc64_get_stackbase(ulong); static ulong ppc64_get_stacktop(ulong); void ppc64_compiler_warning_stub(void); static ulong ppc64_in_irqstack(ulong); static char * ppc64_check_eframe(struct ppc64_pt_regs *); static void ppc64_print_eframe(char *, struct ppc64_pt_regs *, struct bt_info *); static void parse_cmdline_args(void); static int ppc64_paca_init(int); static void ppc64_init_cpu_info(void); static int ppc64_get_cpu_map(void); static void ppc64_clear_machdep_cache(void); static void ppc64_vmemmap_init(void); static int ppc64_get_kvaddr_ranges(struct vaddr_range *); static uint get_ptetype(ulong pte); static int is_hugepage(ulong pte); static int is_hugepd(ulong pte); static ulong hugepage_dir(ulong pte); static inline uint get_ptetype(ulong pte) { uint pte_type = 0; /* 0: regular entry; 1: huge pte; 2: huge pd */ if (is_hugepage(pte)) pte_type = 1; else if (is_hugepd(pte)) pte_type = 2; return pte_type; } static int is_hugepage(ulong pte) { /* * leaf pte for huge page, bottom two bits != 00 */ return ((pte & HUGE_PTE_MASK) != 0x0); } static inline int is_hugepd(ulong pte) { if (THIS_KERNEL_VERSION >= LINUX(3,10,0)) { /* * hugepd pointer, bottom two bits == 00 and next 4 bits * indicate size of table */ return (((pte & HUGE_PTE_MASK) == 0x0) && ((pte & HUGEPD_SHIFT_MASK) != 0)); } else return ((pte & PD_HUGE) == 0x0); } static inline ulong hugepage_dir(ulong pte) { if (THIS_KERNEL_VERSION >= LINUX(3,10,0)) return (ulong)(pte & ~HUGEPD_SHIFT_MASK); else return (ulong)((pte & ~HUGEPD_SHIFT_MASK) | PD_HUGE); } static int book3e_is_kvaddr(ulong addr) { return (addr >= BOOK3E_VMBASE); } static int book3e_is_vmaddr(ulong addr) { return (addr >= BOOK3E_VMBASE) && (addr < machdep->identity_map_base); } static int ppc64_is_vmaddr(ulong addr) { return (vt->vmalloc_start && addr >= vt->vmalloc_start); } struct machine_specific ppc64_machine_specific = { .hwintrstack = { 0 }, .hwstackbuf = 0, .hwstacksize = 0, .pte_shift = PTE_SHIFT, ._page_present = 0x1UL, ._page_user = 0x2UL, ._page_rw = 0x4UL, ._page_guarded = 0x8UL, ._page_coherent = 0x10UL, ._page_no_cache = 0x20UL, ._page_writethru = 0x40UL, ._page_dirty = 0x80UL, ._page_accessed = 0x100UL, .is_kvaddr = generic_is_kvaddr, .is_vmaddr = ppc64_is_vmaddr, }; struct machine_specific book3e_machine_specific = { .hwintrstack = { 0 }, .hwstackbuf = 0, .hwstacksize = 0, .pte_shift = PTE_SHIFT_L4_BOOK3E_64K, ._page_present = 0x1UL, ._page_user = 0xCUL, ._page_rw = 0x30UL, ._page_guarded = 0x100000UL, ._page_coherent = 0x200000UL, ._page_no_cache = 0x400000UL, ._page_writethru = 0x800000UL, ._page_dirty = 0x1000UL, ._page_accessed = 0x40000UL, .is_kvaddr = book3e_is_kvaddr, .is_vmaddr = book3e_is_vmaddr, }; /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void ppc64_init(int when) { #if defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the PPC64 architecture\n"); #endif switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf64_notes; break; case PRE_SYMTAB: machdep->machspec = &ppc64_machine_specific; machdep->verify_symbol = ppc64_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->stacksize = PPC64_STACK_SIZE; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->machspec->last_level4_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->flags |= MACHDEP_BT_TEXT; if (machdep->cmdline_args[0]) parse_cmdline_args(); machdep->clear_machdep_cache = ppc64_clear_machdep_cache; break; case PRE_GDB: /* * Recently there were changes made to kexec tools * to support 64K page size. With those changes * vmcore file obtained from a kernel which supports * 64K page size cannot be analyzed using crash on a * machine running with kernel supporting 4K page size * * The following modifications are required in crash * tool to be in sync with kexec tools. * * Look if the following symbol exists. If yes then * the dump was taken with a kernel supporting 64k * page size. So change the page size accordingly. * * Also moved the following code block from * PRE_SYMTAB case here. */ if (symbol_exists("interrupt_base_book3e")) { machdep->machspec = &book3e_machine_specific; machdep->flags |= BOOK3E; machdep->kvbase = BOOK3E_VMBASE; } else machdep->kvbase = symbol_value("_stext"); if (symbol_exists("__hash_page_64K")) machdep->pagesize = PPC64_64K_PAGE_SIZE; else machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); if ((machdep->machspec->level4 = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc level4 space."); machdep->identity_map_base = symbol_value("_stext"); machdep->is_kvaddr = machdep->machspec->is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ppc64_eframe_search; machdep->back_trace = ppc64_back_trace_cmd; machdep->processor_speed = ppc64_processor_speed; machdep->uvtop = ppc64_uvtop; machdep->kvtop = ppc64_kvtop; machdep->get_task_pgd = ppc64_get_task_pgd; machdep->get_stack_frame = ppc64_get_stack_frame; machdep->get_stackbase = ppc64_get_stackbase; machdep->get_stacktop = ppc64_get_stacktop; machdep->translate_pte = ppc64_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = ppc64_is_task_addr; machdep->dis_filter = ppc64_dis_filter; machdep->cmd_mach = ppc64_cmd_mach; machdep->get_smp_cpus = ppc64_get_smp_cpus; machdep->line_number_hooks = ppc64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->get_kvaddr_ranges = ppc64_get_kvaddr_ranges; machdep->init_kernel_pgd = NULL; if (symbol_exists("vmemmap_populate")) { machdep->flags |= VMEMMAP; machdep->machspec->vmemmap_base = VMEMMAP_REGION_ID << REGION_SHIFT; } machdep->get_irq_affinity = generic_get_irq_affinity; machdep->show_interrupts = generic_show_interrupts; break; case POST_GDB: if (!(machdep->flags & (VM_ORIG|VM_4_LEVEL))) { if (THIS_KERNEL_VERSION >= LINUX(2,6,14)) { machdep->flags |= VM_4_LEVEL; } else { machdep->flags |= VM_ORIG; } } if (machdep->flags & VM_ORIG) { /* pre-2.6.14 layout */ free(machdep->machspec->level4); machdep->machspec->level4 = NULL; machdep->ptrs_per_pgd = PTRS_PER_PGD; } else { /* 2.6.14 layout */ struct machine_specific *m = machdep->machspec; if (machdep->pagesize == 65536) { /* 64K pagesize */ if (THIS_KERNEL_VERSION >= LINUX(3,10,0)) { m->l1_index_size = PTE_INDEX_SIZE_L4_64K_3_10; m->l2_index_size = PMD_INDEX_SIZE_L4_64K_3_10; m->l3_index_size = PUD_INDEX_SIZE_L4_64K; m->l4_index_size = PGD_INDEX_SIZE_L4_64K_3_10; } else { m->l1_index_size = PTE_INDEX_SIZE_L4_64K; m->l2_index_size = PMD_INDEX_SIZE_L4_64K; m->l3_index_size = PUD_INDEX_SIZE_L4_64K; m->l4_index_size = PGD_INDEX_SIZE_L4_64K; } if (!(machdep->flags & BOOK3E)) m->pte_shift = symbol_exists("demote_segment_4k") ? PTE_SHIFT_L4_64K_V2 : PTE_SHIFT_L4_64K_V1; m->l2_masked_bits = PMD_MASKED_BITS_64K; } else { /* 4K pagesize */ m->l1_index_size = PTE_INDEX_SIZE_L4_4K; m->l2_index_size = PMD_INDEX_SIZE_L4_4K; m->l3_index_size = PUD_INDEX_SIZE_L4_4K; m->l4_index_size = PGD_INDEX_SIZE_L4_4K; m->pte_shift = (machdep->flags & BOOK3E) ? PTE_SHIFT_L4_BOOK3E_4K : PTE_SHIFT_L4_4K; m->l2_masked_bits = PMD_MASKED_BITS_4K; } /* Compute ptrs per each level */ m->l1_shift = machdep->pageshift; m->ptrs_per_l1 = (1 << m->l1_index_size); m->ptrs_per_l2 = (1 << m->l2_index_size); m->ptrs_per_l3 = (1 << m->l3_index_size); machdep->ptrs_per_pgd = m->ptrs_per_l3; /* Compute shifts */ m->l2_shift = m->l1_shift + m->l1_index_size; m->l3_shift = m->l2_shift + m->l2_index_size; m->l4_shift = m->l3_shift + m->l3_index_size; } if (machdep->flags & VMEMMAP) ppc64_vmemmap_init(); machdep->section_size_bits = _SECTION_SIZE_BITS; if (THIS_KERNEL_VERSION >= LINUX(3,7,0)) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_3_7; else machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; ppc64_init_cpu_info(); machdep->vmalloc_start = ppc64_vmalloc_start; MEMBER_OFFSET_INIT(thread_struct_pg_tables, "thread_struct", "pg_tables"); STRUCT_SIZE_INIT(irqdesc, "irqdesc"); STRUCT_SIZE_INIT(irq_desc_t, "irq_desc_t"); if (INVALID_SIZE(irqdesc) && INVALID_SIZE(irq_desc_t)) STRUCT_SIZE_INIT(irq_desc_t, "irq_desc"); /* as of 2.3.x PPC uses the generic irq handlers */ if (VALID_SIZE(irq_desc_t)) machdep->dump_irq = generic_dump_irq; else { machdep->dump_irq = ppc64_dump_irq; MEMBER_OFFSET_INIT(irqdesc_action, "irqdesc", "action"); MEMBER_OFFSET_INIT(irqdesc_ctl, "irqdesc", "ctl"); MEMBER_OFFSET_INIT(irqdesc_level, "irqdesc", "level"); } MEMBER_OFFSET_INIT(device_node_type, "device_node", "type"); MEMBER_OFFSET_INIT(device_node_allnext, "device_node", "allnext"); MEMBER_OFFSET_INIT(device_node_properties, "device_node", "properties"); MEMBER_OFFSET_INIT(property_name, "property", "name"); MEMBER_OFFSET_INIT(property_value, "property", "value"); MEMBER_OFFSET_INIT(property_next, "property", "next"); MEMBER_OFFSET_INIT(machdep_calls_setup_residual, "machdep_calls", "setup_residual"); MEMBER_OFFSET_INIT(RESIDUAL_VitalProductData, "RESIDUAL", "VitalProductData"); MEMBER_OFFSET_INIT(VPD_ProcessorHz, "VPD", "ProcessorHz"); MEMBER_OFFSET_INIT(bd_info_bi_intfreq, "bd_info", "bi_intfreq"); if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 0; if (symbol_exists("paca") && MEMBER_EXISTS("paca_struct", "xHrdIntStack")) { ulong paca_sym, offset; uint cpu, paca_size = STRUCT_SIZE("paca_struct"); /* * Get the HW Interrupt stack base and top values. * Note that, this stack will be used to store frames * when the CPU received IPI (only for 2.4 kernel). * Hence it is needed to retrieve IPI symbols * (Ex: smp_message_recv, xics_ipi_action, and etc) * and to get the top SP in the process's stack. */ offset = MEMBER_OFFSET("paca_struct", "xHrdIntStack"); paca_sym = symbol_value("paca"); for (cpu = 0; cpu < kt->cpus; cpu++) { readmem(paca_sym + (paca_size * cpu) + offset, KVADDR, &machdep->machspec->hwintrstack[cpu], sizeof(ulong), "PPC64 HW_intr_stack", FAULT_ON_ERROR); } machdep->machspec->hwstacksize = 8 * machdep->pagesize; if ((machdep->machspec->hwstackbuf = (char *) malloc(machdep->machspec->hwstacksize)) == NULL) error(FATAL, "cannot malloc hwirqstack space."); } else /* * 'xHrdIntStack' member in "paca_struct" is not * available for 2.6 kernel. */ BZERO(&machdep->machspec->hwintrstack, NR_CPUS*sizeof(ulong)); if (!machdep->hz) { machdep->hz = HZ; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) machdep->hz = 1000; } /* * IRQ stacks are introduced in 2.6 and also configurable. */ if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && symbol_exists("hardirq_ctx")) ASSIGN_SIZE(irq_ctx) = STACKSIZE(); break; case POST_INIT: break; case LOG_ONLY: machdep->identity_map_base = kt->vmcoreinfo._stext_SYMBOL; break; } } #ifndef KSYMS_START #define KSYMS_START 1 #endif static ulong ppc64_task_to_stackbase(ulong task) { if (tt->flags & THREAD_INFO) return task_to_thread_info(task); else return task; } static ulong ppc64_get_stackbase(ulong task) { return ppc64_task_to_stackbase(task); } static ulong ppc64_get_stacktop(ulong task) { return ppc64_task_to_stackbase(task) + STACKSIZE(); } void ppc64_dump_machdep_table(ulong arg) { int i, c, others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & MACHDEP_BT_TEXT) fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); if (machdep->flags & VM_ORIG) fprintf(fp, "%sVM_ORIG", others++ ? "|" : ""); if (machdep->flags & VM_4_LEVEL) fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); if (machdep->flags & VMEMMAP) fprintf(fp, "%sVMEMMAP", others++ ? "|" : ""); if (machdep->flags & VMEMMAP_AWARE) fprintf(fp, "%sVMEMMAP_AWARE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: ppc64_eframe_search()\n"); fprintf(fp, " back_trace: ppc64_back_trace_cmd()\n"); fprintf(fp, " processor_speed: ppc64_processor_speed()\n"); fprintf(fp, " uvtop: ppc64_uvtop()\n"); fprintf(fp, " kvtop: ppc64_kvtop()\n"); fprintf(fp, " get_task_pgd: ppc64_get_task_pgd()\n"); fprintf(fp, " dump_irq: ppc64_dump_irq()\n"); fprintf(fp, " get_stack_frame: ppc64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: ppc64_get_stackbase()\n"); fprintf(fp, " get_stacktop: ppc64_get_stacktop()\n"); fprintf(fp, " translate_pte: ppc64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: ppc64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: ppc64_is_task_addr()\n"); fprintf(fp, " verify_symbol: ppc64_verify_symbol()\n"); fprintf(fp, " dis_filter: ppc64_dis_filter()\n"); fprintf(fp, " cmd_mach: ppc64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: ppc64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: %s\n", machdep->is_kvaddr == book3e_is_kvaddr ? "book3e_is_kvaddr()" : "generic_is_kvaddr()"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " get_kvaddr_ranges: ppc64_get_kvaddr_ranges()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " xendump_p2m_create: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: NULL\n"); fprintf(fp, " line_number_hooks: ppc64_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, "clear_machdep_cache: ppc64_clear_machdep_cache()\n"); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); fprintf(fp, " is_kvaddr: %s\n", machdep->machspec->is_kvaddr == book3e_is_kvaddr ? "book3e_is_kvaddr()" : "generic_is_kvaddr()"); fprintf(fp, " is_vmaddr: %s\n", machdep->machspec->is_vmaddr == book3e_is_vmaddr ? "book3e_is_vmaddr()" : "ppc64_is_vmaddr()"); fprintf(fp, " hwintrstack[%d]: ", NR_CPUS); for (c = 0; c < NR_CPUS; c++) { for (others = 0, i = c; i < NR_CPUS; i++) { if (machdep->machspec->hwintrstack[i]) others++; } if (!others) { fprintf(fp, "%s%s", c && ((c % 4) == 0) ? "\n " : "", c ? "(remainder unused)" : "(unused)"); break; } fprintf(fp, "%s%016lx ", ((c % 4) == 0) ? "\n " : "", machdep->machspec->hwintrstack[c]); } fprintf(fp, "\n"); fprintf(fp, " hwstackbuf: %lx\n", (ulong)machdep->machspec->hwstackbuf); fprintf(fp, " hwstacksize: %d\n", machdep->machspec->hwstacksize); fprintf(fp, " level4: %lx\n", (ulong)machdep->machspec->level4); fprintf(fp, " last_level4_read: %lx\n", (ulong)machdep->machspec->last_level4_read); fprintf(fp, " l4_index_size: %d\n", machdep->machspec->l4_index_size); fprintf(fp, " l3_index_size: %d\n", machdep->machspec->l3_index_size); fprintf(fp, " l2_index_size: %d\n", machdep->machspec->l2_index_size); fprintf(fp, " l1_index_size: %d\n", machdep->machspec->l1_index_size); fprintf(fp, " ptrs_per_l3: %d\n", machdep->machspec->ptrs_per_l3); fprintf(fp, " ptrs_per_l2: %d\n", machdep->machspec->ptrs_per_l2); fprintf(fp, " ptrs_per_l1: %d\n", machdep->machspec->ptrs_per_l1); fprintf(fp, " l4_shift: %d\n", machdep->machspec->l4_shift); fprintf(fp, " l3_shift: %d\n", machdep->machspec->l3_shift); fprintf(fp, " l2_shift: %d\n", machdep->machspec->l2_shift); fprintf(fp, " l1_shift: %d\n", machdep->machspec->l1_shift); fprintf(fp, " pte_shift: %d\n", machdep->machspec->pte_shift); fprintf(fp, " l2_masked_bits: %x\n", machdep->machspec->l2_masked_bits); fprintf(fp, " vmemmap_base: "); if (machdep->machspec->vmemmap_base) fprintf(fp, "%lx\n", machdep->machspec->vmemmap_base); else fprintf(fp, "(unused)\n"); if (machdep->machspec->vmemmap_cnt) { fprintf(fp, " vmemmap_cnt: %d\n", machdep->machspec->vmemmap_cnt); fprintf(fp, " vmemmap_psize: %d\n", machdep->machspec->vmemmap_psize); for (i = 0; i < machdep->machspec->vmemmap_cnt; i++) { fprintf(fp, " vmemmap_list[%d]: virt: %lx phys: %lx\n", i, machdep->machspec->vmemmap_list[i].virt, machdep->machspec->vmemmap_list[i].phys); } } else { fprintf(fp, " vmemmap_cnt: (unused)\n"); fprintf(fp, " vmemmap_page_size: (unused)\n"); fprintf(fp, " vmemmap_list[]: (unused)\n"); } } /* * Virtual to physical memory translation. This function will be called * by both ppc64_kvtop and ppc64_uvtop. */ static int ppc64_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose) { ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte, pmd_pte; ulong pte; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); if (THIS_KERNEL_VERSION < LINUX(2,6,0)) page_dir = (ulong *)((uint *)pgd + PGD_OFFSET_24(vaddr)); else page_dir = (ulong *)((uint *)pgd + PGD_OFFSET(vaddr)); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = UINT(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!pgd_pte) return FALSE; pgd_pte <<= PAGESHIFT(); page_middle = (ulong *)((uint *)pgd_pte + PMD_OFFSET(vaddr)); FILL_PMD(PTOV(PAGEBASE(pgd_pte)), KVADDR, PAGESIZE()); pmd_pte = UINT(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; if (THIS_KERNEL_VERSION < LINUX(2,6,0)) pmd_pte <<= PAGESHIFT(); else pmd_pte = ((pmd_pte << PAGESHIFT()) >> PMD_TO_PTEPAGE_SHIFT); page_table = (ulong *)pmd_pte + (BTOP(vaddr) & (PTRS_PER_PTE - 1)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n",(ulong)page_middle, (ulong)page_table); FILL_PTBL(PTOV(PAGEBASE(pmd_pte)), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & _PAGE_PRESENT)) { if (pte && verbose) { fprintf(fp, "\n"); ppc64_translate_pte(pte, 0, PTE_SHIFT); } return FALSE; } if (!pte) return FALSE; *paddr = PAGEBASE(PTOB(pte >> PTE_SHIFT)) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ppc64_translate_pte(pte, 0, PTE_SHIFT); } return TRUE; } /* * Virtual to physical memory translation. This function will be called * by both ppc64_kvtop and ppc64_uvtop. */ static int ppc64_vtop_level4(ulong vaddr, ulong *level4, physaddr_t *paddr, int verbose) { ulong *level4_dir; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong level4_pte, pgd_pte, pmd_pte; ulong pte; uint hugepage_type = 0; /* 0: regular entry; 1: huge pte; 2: huge pd */ if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)level4); level4_dir = (ulong *)((ulong *)level4 + L4_OFFSET(vaddr)); FILL_L4(PAGEBASE(level4), KVADDR, PAGESIZE()); level4_pte = ULONG(machdep->machspec->level4 + PAGEOFFSET(level4_dir)); if (verbose) fprintf(fp, " L4: %lx => %lx\n", (ulong)level4_dir, level4_pte); if (!level4_pte) return FALSE; hugepage_type = get_ptetype(level4_pte); if (hugepage_type) { pte = level4_pte; goto out; } /* Sometimes we don't have level3 pagetable entries */ if (machdep->machspec->l3_index_size != 0) { page_dir = (ulong *)((ulong *)level4_pte + PGD_OFFSET_L4(vaddr)); FILL_PGD(PAGEBASE(level4_pte), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!pgd_pte) return FALSE; hugepage_type = get_ptetype(pgd_pte); if (hugepage_type) { pte = pgd_pte; goto out; } } else { pgd_pte = level4_pte; } page_middle = (ulong *)((ulong *)pgd_pte + PMD_OFFSET_L4(vaddr)); FILL_PMD(PAGEBASE(pgd_pte), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; hugepage_type = get_ptetype(pmd_pte); if (hugepage_type) { pte = pmd_pte; goto out; } page_table = (ulong *)(pmd_pte & ~(machdep->machspec->l2_masked_bits)) + (BTOP(vaddr) & (machdep->machspec->ptrs_per_l1 - 1)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n",(ulong)page_middle, (ulong)page_table); FILL_PTBL(PAGEBASE(pmd_pte), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & _PAGE_PRESENT)) { if (pte && verbose) { fprintf(fp, "\n"); ppc64_translate_pte(pte, 0, machdep->machspec->pte_shift); } return FALSE; } if (!pte) return FALSE; out: if (hugepage_type) { if (hugepage_type == 2) { /* TODO: Calculate the offset within the huge page * directory for this huge page to get corresponding * physical address. In the current form, it may * return the physical address of the first huge page * in this directory for all the huge pages * in this huge page directory. */ readmem(hugepage_dir(pte), KVADDR, &pte, sizeof(pte), "hugepd_entry", RETURN_ON_ERROR); } /* TODO: get page offset for huge pages based on page size */ *paddr = PAGEBASE(PTOB(pte >> machdep->machspec->pte_shift)); } else { *paddr = PAGEBASE(PTOB(pte >> machdep->machspec->pte_shift)) + PAGEOFFSET(vaddr); } if (verbose) { if (hugepage_type) fprintf(fp, " HUGE PAGE: %lx\n\n", PAGEBASE(*paddr)); else fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ppc64_translate_pte(pte, 0, machdep->machspec->pte_shift); } return TRUE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ static int ppc64_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_pg_tables)) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no pg_tables or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (machdep->flags & VM_4_LEVEL) return ppc64_vtop_level4(vaddr, pgd, paddr, verbose); else return ppc64_vtop(vaddr, pgd, paddr, verbose); } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int ppc64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if ((machdep->flags & VMEMMAP) && (kvaddr >= machdep->machspec->vmemmap_base)) return ppc64_vmemmap_to_phys(kvaddr, paddr, verbose); if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } if (machdep->flags & VM_4_LEVEL) return ppc64_vtop_level4(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); else return ppc64_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); } /* * Verify that the kernel has made the vmemmap list available, * and if so, stash the relevant data required to make vtop * translations. */ static void ppc64_vmemmap_init(void) { int i, psize, shift, cnt; struct list_data list_data, *ld; long backing_size, virt_addr_offset, phys_offset, list_offset; ulong *vmemmap_list; char *vmemmap_buf; struct machine_specific *ms; if (!(kernel_symbol_exists("vmemmap_list")) || !(kernel_symbol_exists("mmu_psize_defs")) || !(kernel_symbol_exists("mmu_vmemmap_psize")) || !STRUCT_EXISTS("vmemmap_backing") || !STRUCT_EXISTS("mmu_psize_def") || !MEMBER_EXISTS("mmu_psize_def", "shift") || !MEMBER_EXISTS("vmemmap_backing", "phys") || !MEMBER_EXISTS("vmemmap_backing", "virt_addr") || !MEMBER_EXISTS("vmemmap_backing", "list")) return; ms = machdep->machspec; backing_size = STRUCT_SIZE("vmemmap_backing"); virt_addr_offset = MEMBER_OFFSET("vmemmap_backing", "virt_addr"); phys_offset = MEMBER_OFFSET("vmemmap_backing", "phys"); list_offset = MEMBER_OFFSET("vmemmap_backing", "list"); if (!readmem(symbol_value("mmu_vmemmap_psize"), KVADDR, &psize, sizeof(int), "mmu_vmemmap_psize", RETURN_ON_ERROR)) return; if (!readmem(symbol_value("mmu_psize_defs") + (STRUCT_SIZE("mmu_psize_def") * psize) + MEMBER_OFFSET("mmu_psize_def", "shift"), KVADDR, &shift, sizeof(int), "mmu_psize_def shift", RETURN_ON_ERROR)) return; ms->vmemmap_psize = 1 << shift; ld = &list_data; BZERO(ld, sizeof(struct list_data)); if (!readmem(symbol_value("vmemmap_list"), KVADDR, &ld->start, sizeof(void *), "vmemmap_list", RETURN_ON_ERROR)) return; ld->end = symbol_value("vmemmap_list"); ld->list_head_offset = list_offset; hq_open(); cnt = do_list(ld); vmemmap_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(vmemmap_list, cnt); hq_close(); if ((ms->vmemmap_list = (struct ppc64_vmemmap *)malloc(cnt * sizeof(struct ppc64_vmemmap))) == NULL) error(FATAL, "cannot malloc vmemmap list space"); vmemmap_buf = GETBUF(backing_size); for (i = 0; i < cnt; i++) { if (!readmem(vmemmap_list[i], KVADDR, vmemmap_buf, backing_size, "vmemmap_backing", RETURN_ON_ERROR)) { free(ms->vmemmap_list); goto out; } ms->vmemmap_list[i].phys = ULONG(vmemmap_buf + phys_offset); ms->vmemmap_list[i].virt = ULONG(vmemmap_buf + virt_addr_offset); if (ms->vmemmap_list[i].virt < ms->vmemmap_base) ms->vmemmap_base = ms->vmemmap_list[i].virt; } ms->vmemmap_cnt = cnt; machdep->flags |= VMEMMAP_AWARE; if (CRASHDEBUG(1)) fprintf(fp, "ppc64_vmemmap_init: vmemmap base: %lx\n", ms->vmemmap_base); out: FREEBUF(vmemmap_buf); FREEBUF(vmemmap_list); } /* * If the vmemmap address translation information is stored in the kernel, * make the translation. */ static int ppc64_vmemmap_to_phys(ulong kvaddr, physaddr_t *paddr, int verbose) { int i; ulong offset; struct machine_specific *ms; if (!(machdep->flags & VMEMMAP_AWARE)) { /* * During runtime, just fail the command. */ if (vt->flags & VM_INIT) error(FATAL, "cannot translate vmemmap address: %lx\n", kvaddr); /* * During vm_init() initialization, print a warning message. */ error(WARNING, "cannot translate vmemmap kernel virtual addresses:\n" " commands requiring page structure contents" " will fail\n\n"); return FALSE; } ms = machdep->machspec; for (i = 0; i < ms->vmemmap_cnt; i++) { if ((kvaddr >= ms->vmemmap_list[i].virt) && (kvaddr < (ms->vmemmap_list[i].virt + ms->vmemmap_psize))) { offset = kvaddr - ms->vmemmap_list[i].virt; *paddr = ms->vmemmap_list[i].phys + offset; return TRUE; } } return FALSE; } /* * Determine where vmalloc'd memory starts. */ static ulong ppc64_vmalloc_start(void) { return (first_vmalloc_address()); } /* * */ static int ppc64_is_task_addr(ulong task) { int i; if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else if (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)) return TRUE; for (i = 0; i < kt->cpus; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * */ static ulong ppc64_processor_speed(void) { ulong res, value, ppc_md, md_setup_res; ulong prep_setup_res; ulong node, type, name, properties; char str_buf[32]; uint len; ulong mhz = 0; if (machdep->mhz) return(machdep->mhz); if (symbol_exists("ppc_proc_freq")) { get_symbol_data("ppc_proc_freq", sizeof(ulong), &mhz); mhz /= 1000000; return (machdep->mhz = mhz); } if(symbol_exists("allnodes")) { get_symbol_data("allnodes", sizeof(void *), &node); while(node) { readmem(node+OFFSET(device_node_type), KVADDR, &type, sizeof(ulong), "node type", FAULT_ON_ERROR); if(type != 0) { len = read_string(type, str_buf, sizeof(str_buf)); if(len && (strcasecmp(str_buf, "cpu") == 0)) break; } readmem(node+OFFSET(device_node_allnext), KVADDR, &node, sizeof(ulong), "node allnext", FAULT_ON_ERROR); } /* now, if we found a CPU node, get the speed property */ if(node) { readmem(node+OFFSET(device_node_properties), KVADDR, &properties, sizeof(ulong), "node properties", FAULT_ON_ERROR); while(properties) { readmem(properties+OFFSET(property_name), KVADDR, &name, sizeof(ulong), "property name", FAULT_ON_ERROR); len = read_string(name, str_buf, sizeof(str_buf)); if (len && (strcasecmp(str_buf, "clock-frequency") == 0)) { /* found the right cpu property */ readmem(properties+ OFFSET(property_value), KVADDR, &value, sizeof(ulong), "clock freqency pointer", FAULT_ON_ERROR); readmem(value, KVADDR, &mhz, sizeof(int), "clock frequency value", FAULT_ON_ERROR); mhz /= 1000000; break; } else if(len && (strcasecmp(str_buf, "ibm,extended-clock-frequency") == 0)){ /* found the right cpu property */ readmem(properties+ OFFSET(property_value), KVADDR, &value, sizeof(ulong), "clock freqency pointer", FAULT_ON_ERROR); readmem(value, KVADDR, &mhz, sizeof(ulong), "clock frequency value", FAULT_ON_ERROR); mhz /= 1000000; break; } /* keep looking */ readmem(properties+ OFFSET(property_next), KVADDR, &properties, sizeof(ulong), "property next", FAULT_ON_ERROR); } if(!properties) { /* didn't find the cpu speed for some reason */ return (machdep->mhz = 0); } } } /* for machines w/o OF */ /* untested, but in theory this should work on prep machines */ if (symbol_exists("res") && !mhz) { get_symbol_data("res", sizeof(void *), &res); if (symbol_exists("prep_setup_residual")) { get_symbol_data("prep_setup_residual", sizeof(void *), &prep_setup_res); get_symbol_data("ppc_md", sizeof(void *), &ppc_md); readmem(ppc_md + OFFSET(machdep_calls_setup_residual), KVADDR, &md_setup_res, sizeof(ulong), "ppc_md setup_residual", FAULT_ON_ERROR); if(prep_setup_res == md_setup_res) { /* PREP machine */ readmem(res+ OFFSET(RESIDUAL_VitalProductData)+ OFFSET(VPD_ProcessorHz), KVADDR, &mhz, sizeof(ulong), "res VitalProductData", FAULT_ON_ERROR); mhz = (mhz > 1024) ? mhz >> 20 : mhz; } } if(!mhz) { /* everything else seems to do this the same way... */ readmem(res + OFFSET(bd_info_bi_intfreq), KVADDR, &mhz, sizeof(ulong), "bd_info bi_intfreq", FAULT_ON_ERROR); mhz /= 1000000; } } /* else...well, we don't have OF, or a residual structure, so * just print unknown MHz */ return (machdep->mhz = (ulong)mhz); } /* * Accept or reject a symbol from the kernel namelist. */ static int ppc64_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "_start") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; return (name && strlen(name) && (machdep->flags & KSYMS_START) && !STREQ(name, "Letext") && !STRNEQ(name, "__func__.")); } /* * Get the relevant page directory pointer from a task structure. */ static ulong ppc64_get_task_pgd(ulong task) { long offset; ulong pg_tables; offset = VALID_MEMBER(task_struct_thread) ? OFFSET(task_struct_thread) : OFFSET(task_struct_tss); if (INVALID_MEMBER(thread_struct_pg_tables)) error(FATAL, "pg_tables does not exist in this kernel's thread_struct\n"); offset += OFFSET(thread_struct_pg_tables); readmem(task + offset, KVADDR, &pg_tables, sizeof(ulong), "task thread pg_tables", FAULT_ON_ERROR); return(pg_tables); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int ppc64_translate_pte(ulong pte, void *physaddr, ulonglong pte_shift) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; ulong paddr; paddr = PTOB(pte >> pte_shift); page_present = (pte & _PAGE_PRESENT); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_PRESENT) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte & _PAGE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (pte & _PAGE_RW) fprintf(fp, "%sRW", others++ ? "|" : ""); if (pte & _PAGE_GUARDED) fprintf(fp, "%sGUARDED", others++ ? "|" : ""); if (pte & _PAGE_COHERENT) fprintf(fp, "%sCOHERENT", others++ ? "|" : ""); if (pte & _PAGE_NO_CACHE) fprintf(fp, "%sNO_CACHE", others++ ? "|" : ""); if (pte & _PAGE_WRITETHRU) fprintf(fp, "%sWRITETHRU", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); } else fprintf(fp, "no mapping"); fprintf(fp, ")\n"); return page_present; } /* * The user specified SP could be in HW interrupt stack for tasks running on * other CPUs. Hence, get the SP which is in process's stack. */ static ulong ppc64_check_sp_in_HWintrstack(ulong sp, struct bt_info *bt) { /* * Since the seperate HW Interrupt stack is involved to store * IPI frames, printing all stack symbols or searching for exception * frames for running tasks on other CPUS is tricky. The simple * solution is - ignore HW intr stack and search in the process stack. * Anyway the user will be interested only frames that are * involved before receiving CALL_FUNCTION_IPI. * So, if the SP is not within the stack, read the top value * from the HW Interrupt stack which is the SP points to top * frame in the process's stack. * * Note: HW Interrupt stack is used only in 2.4 kernel. */ if (is_task_active(bt->task) && (tt->panic_task != bt->task) && machdep->machspec->hwintrstack[bt->tc->processor]) { ulong newsp; readmem(machdep->machspec->hwintrstack[bt->tc->processor], KVADDR, &newsp, sizeof(ulong), "stack pointer", FAULT_ON_ERROR); if (INSTACK(newsp, bt)) sp = newsp; } return sp; } /* * Look for likely exception frames in a stack. */ static int ppc64_eframe_search(struct bt_info *bt_in) { ulong addr; struct bt_info bt_local, *bt; ulong *stack, *first, *last; ulong irqstack; char *mode; ulong eframe_addr; int c, cnt; struct ppc64_pt_regs *regs; bt = bt_in; if (bt->flags & BT_EFRAME_SEARCH2) { if (!(tt->flags & IRQSTACKS)) { error(INFO, "This kernel does not have IRQ stacks\n"); return 0; } BCOPY(bt_in, &bt_local, sizeof(struct bt_info)); bt = &bt_local; bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; bt->hp->esp = tt->hardirq_ctx[c]; fprintf(fp, "CPU %d HARD IRQ STACK:\n", c); if ((cnt = ppc64_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } for (c = 0; c < NR_CPUS; c++) { if (tt->softirq_ctx[c]) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; bt->hp->esp = tt->softirq_ctx[c]; fprintf(fp, "CPU %d SOFT IRQ STACK:\n", c); if ((cnt = ppc64_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } return 0; } if (bt->hp && bt->hp->esp) { BCOPY(bt_in, &bt_local, sizeof(struct bt_info)); bt = &bt_local; addr = bt->hp->esp; if ((irqstack = ppc64_in_irqstack(addr))) { bt->stackbase = irqstack; bt->stacktop = irqstack + STACKSIZE(); alter_stackbuf(bt); addr = bt->stackbase + roundup(SIZE(thread_info), sizeof(ulong)); } else if (!INSTACK(addr, bt)) { /* * If the user specified SP is in HW interrupt stack * (only for tasks running on other CPUs and in 2.4 * kernel), get the top SP points to process's stack. */ addr = ppc64_check_sp_in_HWintrstack(addr, bt); if (!INSTACK(addr, bt)) error(FATAL, "unrecognized stack address for this task: %lx\n", addr); } } else if (tt->flags & THREAD_INFO) addr = bt->stackbase + roundup(SIZE(thread_info), sizeof(ulong)); else addr = bt->stackbase + roundup(SIZE(task_struct), sizeof(ulong)); if (!INSTACK(addr, bt)) return(0); stack = (ulong *)bt->stackbuf; first = stack + ((addr - bt->stackbase) / sizeof(ulong)); last = stack + (((bt->stacktop - bt->stackbase) - SIZE(pt_regs)) / sizeof(ulong)); for ( ; first <= last; first++) { char *efrm_str = NULL; eframe_addr = bt->stackbase + sizeof(ulong) * (first - stack); if (THIS_KERNEL_VERSION < LINUX(2,6,0)) { regs = (struct ppc64_pt_regs *)first; if (!IS_KVADDR(regs->gpr[1]) || !IS_KVADDR(regs->nip) || !is_kernel_text(regs->nip)) if (!IS_UVADDR(regs->gpr[1], bt->tc) || !IS_UVADDR(regs->nip, bt->tc)) continue; } else { /* * In 2.6 or later, 0x7265677368657265 is saved in the * stack (sp + 96) for the exception frame. Also, * pt_regs will be saved at sp + 112. * Hence, once we know the location of exception marker * in the stack, pt_regs is saved at * - 96 + 112. ==> first + 16. */ if (*first == EXCP_FRAME_MARKER) { ulong *sp; /* * SP points to - 96/8; */ sp = (ulong *)(first - 12); if (!IS_KVADDR(*sp)) if (!IS_UVADDR(*sp, bt->tc)) continue; first = (ulong *)((char *)first + 16); regs = (struct ppc64_pt_regs *)first; } else continue; } if ((efrm_str = ppc64_check_eframe(regs)) != NULL) { if ((((regs)->msr) >> MSR_PR_LG) & 0x1) mode = "USER-MODE"; else mode = "KERNEL-MODE"; fprintf(fp, "%s %s EXCEPTION FRAME AT %lx:\n", bt->flags & BT_EFRAME_SEARCH ? "\n" : "", mode, eframe_addr); ppc64_print_eframe(efrm_str, regs, bt); } } return 0; } static ulong ppc64_in_irqstack(ulong addr) { int c; if (!(tt->flags & IRQSTACKS)) return 0; for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { if ((addr >= tt->hardirq_ctx[c]) && (addr < (tt->hardirq_ctx[c] + SIZE(irq_ctx)))) return(tt->hardirq_ctx[c]); } if (tt->softirq_ctx[c]) { if ((addr >= tt->softirq_ctx[c]) && (addr < (tt->softirq_ctx[c] + SIZE(irq_ctx)))) return(tt->softirq_ctx[c]); } } return 0; } /* * Unroll a kernel stack. */ static void ppc64_back_trace_cmd(struct bt_info *bt) { char buf[BUFSIZE]; struct gnu_request *req; extern void print_stack_text_syms(struct bt_info *, ulong, ulong); bt->flags |= BT_EXCEPTION_FRAME; if (CRASHDEBUG(1) || bt->debug) fprintf(fp, " => PC: %lx (%s) FP: %lx \n", bt->instptr, value_to_symstr(bt->instptr, buf, 0), bt->stkptr); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_STACK_TRACE; req->flags = GNU_RETURN_ON_ERROR; req->buf = GETBUF(BUFSIZE); req->debug = bt->debug; req->task = bt->task; req->pc = bt->instptr; req->sp = bt->stkptr; if (bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { if (!INSTACK(req->sp, bt)) /* * If the user specified SP is in HW interrupt stack * (only for tasks running on other CPUs and in 2.4 * kernel), get the top SP points to process's stack. */ req->sp = ppc64_check_sp_in_HWintrstack(req->sp, bt); print_stack_text_syms(bt, req->sp, req->pc); } else { if (bt->flags & BT_USE_GDB) { strcpy(req->buf, "backtrace"); gdb_interface(req); } else ppc64_back_trace(req, bt); } FREEBUF(req->buf); FREEBUF(req); } /* * Unroll the kernel stack using a minimal amount of gdb services. * * (Ref: 64-bit PowerPC ELF ABI Spplement; Ian Lance Taylor, Zembu Labs). * A PPC64 stack frame looks like this: * * High Address * .-> Back Chain (etc...) * | FP reg save area * | GP reg save area * | Local var space * | Parameter save area (SP+48) * | TOC save area (SP+40) * | link editor doubleword (SP+32) * | compiler doubleword (SP+24) * | LR save (SP+16) * | CR save (SP+8) * `- Back Chain <-- sp (SP+0) * * Note that the LR (ret addr) may not be saved in the current frame if * no functions have been called from the current function. */ /* HACK: put an initial lr in this var for find_trace(). It will be * cleared during the trace. */ static void ppc64_back_trace(struct gnu_request *req, struct bt_info *bt) { int frame = 0; ulong lr = 0; /* hack...need to pass in initial lr reg */ ulong newpc = 0, newsp, marker; int eframe_found; if (!INSTACK(req->sp, bt)) { ulong irqstack; struct machine_specific *ms = machdep->machspec; if ((irqstack = ppc64_in_irqstack(req->sp))) { bt->stackbase = irqstack; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); } else if (ms->hwintrstack[bt->tc->processor]) { bt->stacktop = ms->hwintrstack[bt->tc->processor] + sizeof(ulong); bt->stackbase = ms->hwintrstack[bt->tc->processor] - ms->hwstacksize + STACK_FRAME_OVERHEAD; bt->stackbuf = ms->hwstackbuf; alter_stackbuf(bt); } else { if (CRASHDEBUG(1)) { fprintf(fp, "cannot find the stack info.\n"); } return; } } while (INSTACK(req->sp, bt)) { newsp = *(ulong *)&bt->stackbuf[req->sp - bt->stackbase]; if ((req->name = closest_symbol(req->pc)) == NULL) { if (CRASHDEBUG(1)) { error(FATAL, "ppc64_back_trace hit unknown symbol (%lx).\n", req->pc); } } bt->flags |= BT_SAVE_LASTSP; ppc64_print_stack_entry(frame, req, newsp, lr, bt); bt->flags &= ~(ulonglong)BT_SAVE_LASTSP; lr = 0; if (IS_KVADDR(newsp)) { /* * In 2.4, HW interrupt stack will be used to save * smp_call_functions symbols. i.e, when the dumping * CPU is issued IPI call to freeze other CPUS, */ if (INSTACK(newsp, bt) && (newsp + 16 > bt->stacktop)) newsp = *(ulong *)&bt->stackbuf[newsp - bt->stackbase]; if (!INSTACK(newsp, bt)) { /* * Switch HW interrupt stack to process's stack. */ bt->stackbase = GET_STACKBASE(bt->task); bt->stacktop = GET_STACKTOP(bt->task); alter_stackbuf(bt); } if (IS_KVADDR(newsp) && INSTACK(newsp, bt)) newpc = *(ulong *)&bt->stackbuf[newsp + 16 - bt->stackbase]; } if (BT_REFERENCE_FOUND(bt)) return; eframe_found = FALSE; /* * Is this frame an execption one? * In 2.6, 0x7265677368657265 is saved and used * to determine the execption frame. */ if (THIS_KERNEL_VERSION < LINUX(2,6,0)) { if (frame && (newsp - req->sp - STACK_FRAME_OVERHEAD) >= sizeof(struct ppc64_pt_regs)) eframe_found = TRUE; else if (STREQ(req->name, ".ret_from_except")) eframe_found = TRUE; } else if ((newsp - req->sp - STACK_FRAME_OVERHEAD) >= sizeof(struct ppc64_pt_regs)) { readmem(req->sp+0x60, KVADDR, &marker, sizeof(ulong), "stack frame", FAULT_ON_ERROR); if (marker == EXCP_FRAME_MARKER) eframe_found = TRUE; } if (eframe_found) { char *efrm_str = NULL; struct ppc64_pt_regs regs; readmem(req->sp+STACK_FRAME_OVERHEAD, KVADDR, ®s, sizeof(struct ppc64_pt_regs), "exception frame", FAULT_ON_ERROR); efrm_str = ppc64_check_eframe(®s); if (efrm_str) { ppc64_print_eframe(efrm_str, ®s, bt); lr = regs.link; newpc = regs.nip; newsp = regs.gpr[1]; } } /* * Some Linux 3.7 kernel threads have been seen to have * their end-of-trace stack linkage pointer pointing * back to itself (instead of NULL), which would cause * an infinite loop at the .ret_from_kernel_thread frame. */ if (req->sp == newsp) break; req->pc = newpc; req->sp = newsp; frame++; } } static void ppc64_display_full_frame(struct bt_info *bt, ulong nextsp, FILE *ofp) { int i, u_idx; ulong *nip; ulong words, addr; char buf[BUFSIZE]; if (!INSTACK(nextsp, bt)) nextsp = bt->stacktop; words = (nextsp - bt->frameptr) / sizeof(ulong); addr = bt->frameptr; u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); nip = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(ofp, "%s ", format_stack_entry(bt, buf, *nip, 0)); addr += sizeof(ulong); } fprintf(ofp, "\n"); } /* * print one entry of a stack trace */ static void ppc64_print_stack_entry(int frame, struct gnu_request *req, ulong newsp, ulong lr, struct bt_info *bt) { struct load_module *lm; char *lrname = NULL; ulong offset; struct syment *sp; char *name_plus_offset; char buf[BUFSIZE]; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(req->name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; break; case BT_REF_HEXVAL: if (bt->ref->hexval == req->pc) bt->ref->cmdflags |= BT_REF_FOUND; break; } } else { name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(req->pc, &offset); if (sp && offset) name_plus_offset = value_to_symstr(req->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%lx] %s at %lx", frame < 10 ? " " : "", frame, req->sp, name_plus_offset ? name_plus_offset : req->name, req->pc); if (module_symbol(req->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); if (req->ra) { /* * Previous frame is an exception one. If the func * symbol for the current frame is same as with * the previous frame's LR value, print "(unreliable)". */ lrname = closest_symbol(req->ra); req->ra = 0; if (!lrname) { if (CRASHDEBUG(1)) error(FATAL, "ppc64_back_trace hit unknown symbol (%lx).\n", req->ra); return; } } if (lr) { /* * Link register value for an expection frame. */ if ((lrname = closest_symbol(lr)) == NULL) { if (CRASHDEBUG(1)) error(FATAL, "ppc64_back_trace hit unknown symbol (%lx).\n", lr); return; } if (req->pc != lr) { fprintf(fp, "\n%s[Link Register] ", frame < 10 ? " " : ""); fprintf(fp, "[%lx] %s at %lx", req->sp, lrname, lr); } req->ra = lr; } if (!req->name || STREQ(req->name,lrname)) fprintf(fp, " (unreliable)"); fprintf(fp, "\n"); } if (bt->flags & BT_SAVE_LASTSP) req->lastsp = req->sp; bt->frameptr = req->sp; if (bt->flags & BT_FULL) if (IS_KVADDR(newsp)) ppc64_display_full_frame(bt, newsp, fp); if (bt->flags & BT_LINE_NUMBERS) ppc64_dump_line_number(req->pc); } /* * Check whether the frame is exception one! */ static char * ppc64_check_eframe(struct ppc64_pt_regs *regs) { switch(regs->trap & ~0xF) { case 0x100: return("System Reset"); case 0x200: return("Machine Check"); case 0x300: return("Data Access"); case 0x380: return("Data SLB Access"); case 0x400: return("Instruction Access"); case 0x480: return("Instruction SLB Access"); case 0x500: return("Hardware Interrupt"); case 0x600: return("Alignment"); case 0x700: return("Program Check"); case 0x800: return("FPU Unavailable"); case 0x900: return("Decrementer"); case 0x980: return("Hypervisor Decrementer"); case 0xa00: return("Doorbell"); case 0xb00: return("reserved"); case 0xc00: return("System Call"); case 0xd00: return("Single Step"); case 0xe00: return("fp assist"); case 0xe40: return("Emulation Assist"); case 0xe60: return("HMI"); case 0xe80: return("Hypervisor Doorbell"); case 0xf00: return("Performance Monitor"); case 0xf20: return("Altivec Unavailable"); case 0x1300: return("Instruction Breakpoint"); case 0x1500: return("Denormalisation"); case 0x1700: return("Altivec Assist"); } /* No exception frame exists */ return NULL; } static void ppc64_print_regs(struct ppc64_pt_regs *regs) { int i; /* print out the gprs... */ for (i=0; i<32; i++) { if (i && !(i % 3)) fprintf(fp, "\n"); fprintf(fp, " R%d:%s %016lx ", i, ((i < 10) ? " " : ""), regs->gpr[i]); /* * In 2.6, some stack frame contains only partial regs set. * For the partial set, only 14 regs will be saved and trap * field will contain 1 in the least significant bit. */ if ((i == 13) && (regs->trap & 1)) break; } fprintf(fp, "\n"); /* print out the rest of the registers */ fprintf(fp, " NIP: %016lx ", regs->nip); fprintf(fp, " MSR: %016lx ", regs->msr); fprintf(fp, "OR3: %016lx\n", regs->orig_gpr3); fprintf(fp, " CTR: %016lx ", regs->ctr); fprintf(fp, " LR: %016lx ", regs->link); fprintf(fp, "XER: %016lx\n", regs->xer); fprintf(fp, " CCR: %016lx ", regs->ccr); fprintf(fp, " MQ: %016lx ", regs->mq); fprintf(fp, "DAR: %016lx\n", regs->dar); fprintf(fp, " DSISR: %016lx ", regs->dsisr); fprintf(fp, " Syscall Result: %016lx\n", regs->result); } /* * Print the exception frame information */ static void ppc64_print_eframe(char *efrm_str, struct ppc64_pt_regs *regs, struct bt_info *bt) { if (BT_REFERENCE_CHECK(bt)) return; fprintf(fp, " %s [%lx] exception frame:\n", efrm_str, regs->trap); ppc64_print_regs(regs); } /* * get SP and IP from the saved ptregs. */ static int ppc64_kdump_stack_frame(struct bt_info *bt_in, ulong *nip, ulong *ksp) { struct ppc64_pt_regs *pt_regs; unsigned long unip; pt_regs = (struct ppc64_pt_regs *)bt_in->machdep; if (!pt_regs || !pt_regs->gpr[1]) { /* * Not collected regs. May be the corresponding CPU not * responded to an IPI. */ fprintf(fp, "%0lx: GPR1 register value (SP) was not saved\n", bt_in->task); return FALSE; } *ksp = pt_regs->gpr[1]; if (IS_KVADDR(*ksp)) { readmem(*ksp+16, KVADDR, &unip, sizeof(ulong), "Regs NIP value", FAULT_ON_ERROR); *nip = unip; } else { if (IN_TASK_VMA(bt_in->task, *ksp)) fprintf(fp, "%0lx: Task is running in user space\n", bt_in->task); else fprintf(fp, "%0lx: Invalid Stack Pointer %0lx\n", bt_in->task, *ksp); *nip = pt_regs->nip; } if (bt_in->flags && ((BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT))) return TRUE; /* * Print the collected regs for the active task */ ppc64_print_regs(pt_regs); if (!IS_KVADDR(*ksp)) return FALSE; fprintf(fp, " NIP [%016lx] %s\n", pt_regs->nip, closest_symbol(pt_regs->nip)); if (unip != pt_regs->link) fprintf(fp, " LR [%016lx] %s\n", pt_regs->link, closest_symbol(pt_regs->link)); return TRUE; } /* * Get the starting point for the active cpus in a diskdump/netdump. */ static int ppc64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *nip, ulong *ksp) { int panic_task; int i; char *sym; ulong *up; struct bt_info bt_local, *bt; struct machine_specific *ms; ulong ur_nip = 0; ulong ur_ksp = 0; int check_hardirq, check_softirq; int check_intrstack = TRUE; struct ppc64_pt_regs *pt_regs; struct syment *sp; /* * For the kdump vmcore, Use SP and IP values that are saved in ptregs. */ if (pc->flags & KDUMP) return ppc64_kdump_stack_frame(bt_in, nip, ksp); bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); ms = machdep->machspec; ur_nip = ur_ksp = 0; panic_task = tt->panic_task == bt->task ? TRUE : FALSE; check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE; if (panic_task && bt->machdep) { pt_regs = (struct ppc64_pt_regs *)bt->machdep; ur_nip = pt_regs->nip; ur_ksp = pt_regs->gpr[1]; } else if (bt->task != tt->panic_task) { char cpu_frozen = FALSE; /* * Determine whether the CPU responded to an IPI. * We captured the GPR1 register value in the * platform_freeze_cpu() function. */ if ((sp = symbol_search("dump_header")) && !is_symbol_text(sp)) { /* Diskdump */ ulong task_addr; /* * The dump_header struct is specified in the module. */ ulong offset = roundup(STRUCT_SIZE("timespec") + STRUCT_SIZE("new_utsname") + 52, 8); offset += sizeof(ulong) * bt->tc->processor; readmem(symbol_value("dump_header") + offset, KVADDR, &task_addr, sizeof(ulong), "Task Address", FAULT_ON_ERROR); if (task_addr) cpu_frozen = TRUE; } if (!cpu_frozen && symbol_exists("cpus_frozen")) { /* Netdump */ readmem(symbol_value("cpus_frozen") + sizeof(char) * bt->tc->processor, KVADDR, &cpu_frozen, sizeof(char), "CPU Frozen Value", FAULT_ON_ERROR); } ur_ksp = ppc64_get_sp(bt->task); if (IS_KVADDR(ur_ksp)) { /* * Since we could not capture the NIP value, we do not * know the top symbol name. Hence, move the SP to next * frame. */ if (cpu_frozen) readmem(ur_ksp, KVADDR, &ur_ksp, sizeof(ulong), "Stack Pointer", FAULT_ON_ERROR); else if (symbol_exists("platform_freeze_cpu")) fprintf(fp, "%0lx: GPR1 register value (SP) was not saved\n", bt->task); if (IS_KVADDR(ur_ksp)) /* * Get the LR value stored in the stack frame. */ readmem(ur_ksp+16, KVADDR, &ur_nip, sizeof(ulong), "Regs NIP value", FAULT_ON_ERROR); *ksp = ur_ksp; *nip = ur_nip; } else { *ksp = ur_ksp; fprintf(fp, "Could not find SP for task %0lx\n", bt->task); } } /* * Check the process stack first. We are scanning stack for only * panic task. Even though we have dumping CPU's regs, we will be * looking for specific symbols to display trace from actual dump * functions. If these symbols are not exists, consider the regs * stored in the ELF header. */ retry: for (i = 0, up = (ulong *)bt->stackbuf; i < (bt->stacktop - bt->stackbase)/sizeof(ulong); i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, ".netconsole_netdump") || STREQ(sym, ".netpoll_start_netdump") || STREQ(sym, ".start_disk_dump") || STREQ(sym, "crash_kexec") || STREQ(sym, "crash_fadump") || STREQ(sym, "crash_ipi_callback") || STREQ(sym, ".crash_kexec") || STREQ(sym, ".crash_fadump") || STREQ(sym, ".crash_ipi_callback") || STREQ(sym, ".disk_dump")) { *nip = *up; *ksp = bt->stackbase + ((char *)(up) - 16 - bt->stackbuf); return TRUE; } } bt->flags &= ~(BT_HARDIRQ|BT_SOFTIRQ); if (check_hardirq && (tt->hardirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->hardirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_HARDIRQ; check_hardirq = FALSE; goto retry; } if (check_softirq && (tt->softirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->softirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_SOFTIRQ; check_softirq = FALSE; goto retry; } if (check_intrstack && ms->hwintrstack[bt->tc->processor]) { bt->stacktop = ms->hwintrstack[bt->tc->processor] + sizeof(ulong); bt->stackbase = ms->hwintrstack[bt->tc->processor] - ms->hwstacksize + STACK_FRAME_OVERHEAD; bt->stackbuf = ms->hwstackbuf; alter_stackbuf(bt); check_intrstack = FALSE; goto retry; } /* * We didn't find what we were looking for, so just use what was * passed in the ELF header. */ if (ur_nip && ur_ksp) { *nip = ur_nip; *ksp = ur_ksp; return TRUE; } console("ppc64_get_dumpfile_stack_frame: cannot find SP for panic task\n"); return FALSE; } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void ppc64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { ulong ksp, nip; nip = ksp = 0; if (DUMPFILE() && is_task_active(bt->task)) ppc64_get_dumpfile_stack_frame(bt, &nip, &ksp); else get_ppc64_frame(bt, &nip, &ksp); if (pcp) *pcp = nip; if (spp) *spp = ksp; } static ulong ppc64_get_sp(ulong task) { ulong sp; if (tt->flags & THREAD_INFO) readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, &sp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); else { ulong offset; offset = OFFSET_OPTION(task_struct_thread_ksp, task_struct_tss_ksp); readmem(task + offset, KVADDR, &sp, sizeof(void *), "task_struct ksp", FAULT_ON_ERROR); } return sp; } /* * get the SP and PC values for idle tasks. */ static void get_ppc64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { ulong ip; ulong sp; ulong *stack; ulong task; char *closest; struct ppc64_pt_regs regs; ip = 0; task = bt->task; stack = (ulong *)bt->stackbuf; sp = ppc64_get_sp(task); if (!INSTACK(sp, bt)) goto out; readmem(sp+STACK_FRAME_OVERHEAD, KVADDR, ®s, sizeof(struct ppc64_pt_regs), "PPC64 pt_regs", FAULT_ON_ERROR); ip = regs.nip; closest = closest_symbol(ip); if (STREQ(closest, ".__switch_to") || STREQ(closest, "__switch_to")) { /* NOTE: _switch_to() calls _switch() which * is asm. _switch leaves pc == lr. * Working through this frame is tricky, * and this mess isn't going to help if we * actually dumped here. Most likely the * analyzer is trying to backtrace a task. * Need to skip 2 frames. */ sp = stack[(sp - bt->stackbase)/sizeof(ulong)]; if (!INSTACK(sp, bt)) goto out; sp = stack[(sp - bt->stackbase)/sizeof(ulong)]; if (!INSTACK(sp+16, bt)) goto out; ip = stack[(sp + 16 - bt->stackbase)/sizeof(ulong)]; } out: *getsp = sp; *getpc = ip; } /* * Do the work for cmd_irq(). */ static void ppc64_dump_irq(int irq) { ulong irq_desc_addr, addr; int level, others; ulong action, ctl, value; char typename[32]; irq_desc_addr = symbol_value("irq_desc") + (SIZE(irqdesc) * irq); readmem(irq_desc_addr + OFFSET(irqdesc_level), KVADDR, &level, sizeof(int), "irq_desc entry", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irqdesc_action), KVADDR, &action, sizeof(long), "irq_desc entry", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irqdesc_ctl), KVADDR, &ctl, sizeof(long), "irq_desc entry", FAULT_ON_ERROR); fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, " STATUS: 0\n"); fprintf(fp, "HANDLER: "); if (value_symbol(ctl)) { fprintf(fp, "%lx ", ctl); pad_line(fp, VADDR_PRLEN == 8 ? VADDR_PRLEN+2 : VADDR_PRLEN-6, ' '); fprintf(fp, "<%s>\n", value_symbol(ctl)); } else fprintf(fp, "%lx\n", ctl); if(ctl) { /* typename */ readmem(ctl + OFFSET(hw_interrupt_type_typename), KVADDR, &addr, sizeof(ulong), "typename pointer", FAULT_ON_ERROR); fprintf(fp, " typename: %08lx ", addr); if (read_string(addr, typename, 32)) fprintf(fp, "\"%s\"\n", typename); else fprintf(fp, "\n"); /* startup...I think this is always 0 */ readmem(ctl + OFFSET(hw_interrupt_type_startup), KVADDR, &addr, sizeof(ulong), "interrupt startup", FAULT_ON_ERROR); fprintf(fp, " startup: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); /* shutdown...I think this is always 0 */ readmem(ctl + OFFSET(hw_interrupt_type_shutdown), KVADDR, &addr, sizeof(ulong), "interrupt shutdown", FAULT_ON_ERROR); fprintf(fp, " shutdown: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); if (VALID_MEMBER(hw_interrupt_type_handle)) { /* handle */ readmem(ctl + OFFSET(hw_interrupt_type_handle), KVADDR, &addr, sizeof(ulong), "interrupt handle", FAULT_ON_ERROR); fprintf(fp, " handle: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); } /* enable/disable */ readmem(ctl + OFFSET(hw_interrupt_type_enable), KVADDR, &addr, sizeof(ulong), "interrupt enable", FAULT_ON_ERROR); fprintf(fp, " enable: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); readmem(ctl + OFFSET(hw_interrupt_type_disable), KVADDR, &addr, sizeof(ulong), "interrupt disable", FAULT_ON_ERROR); fprintf(fp, " disable: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "0\n"); } /* next, the action... and its submembers */ if(!action) fprintf(fp, " ACTION: (none)\n"); while(action) { fprintf(fp, " ACTION: %08lx\n", action); /* handler */ readmem(action + OFFSET(irqaction_handler), KVADDR, &addr, sizeof(ulong), "action handler", FAULT_ON_ERROR); fprintf(fp, " handler: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "0\n"); /* flags */ readmem(action + OFFSET(irqaction_flags), KVADDR, &value, sizeof(ulong), "action flags", FAULT_ON_ERROR); fprintf(fp, " flags: %lx ", value); if (value) { others = 0; fprintf(fp, "("); if (value & SA_INTERRUPT) fprintf(fp, "%sSA_INTERRUPT", others++ ? "|" : ""); if (value & SA_PROBE) fprintf(fp, "%sSA_PROBE", others++ ? "|" : ""); if (value & SA_SAMPLE_RANDOM) fprintf(fp, "%sSA_SAMPLE_RANDOM", others++ ? "|" : ""); if (value & SA_SHIRQ) fprintf(fp, "%sSA_SHIRQ", others++ ? "|" : ""); fprintf(fp, ")"); if (value & ~ACTION_FLAGS) { fprintf(fp, " (bits %lx not translated)", value & ~ACTION_FLAGS); } } fprintf(fp, "\n"); /* mask */ readmem(action + OFFSET(irqaction_mask), KVADDR, &value, sizeof(ulong), "action mask", FAULT_ON_ERROR); fprintf(fp, " mask: %lx\n", value); /* name */ readmem(action + OFFSET(irqaction_name), KVADDR, &addr, sizeof(ulong), "action name", FAULT_ON_ERROR); fprintf(fp, " name: %08lx ", addr); if (read_string(addr, typename, 32)) fprintf(fp, "\"%s\"\n", typename); else fprintf(fp, "\n"); /* dev_id */ readmem(action + OFFSET(irqaction_dev_id), KVADDR, &value, sizeof(ulong), "action dev_id", FAULT_ON_ERROR); fprintf(fp, " dev_id: %08lx\n", value); /* next */ readmem(action + OFFSET(irqaction_next), KVADDR, &value, sizeof(ulong), "action next", FAULT_ON_ERROR); fprintf(fp, " next: %lx\n", value); /* keep going if there are chained interrupts */ action = value; } fprintf(fp, " DEPTH: %x\n\n", level); } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int ppc64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on ppc64) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !(STRNEQ(p1, " 0x") || STRNEQ(p1, ",0x"))) p1--; if (!(STRNEQ(p1, " 0x") || STRNEQ(p1, ",0x"))) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int ppc64_get_smp_cpus(void) { return get_cpus_online(); } /* * Machine dependent command. */ void ppc64_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != EOF) { switch(c) { case 'c': case 'm': fprintf(fp, "PPC64: '-%c' option is not supported\n", c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); ppc64_display_machine_stats(); } /* * "mach" command output. */ static void ppc64_display_machine_stats(void) { int c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (tt->flags & IRQSTACKS) { fprintf(fp, "HARD IRQ STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " HARD IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->hardirq_ctx[c]) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->hardirq_ctx[c]); } fprintf(fp, "SOFT IRQ STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " SOFT IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->softirq_ctx) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->softirq_ctx[c]); } } } static const char *hook_files[] = { "arch/ppc64/kernel/entry.S", "arch/ppc64/kernel/head.S", "arch/ppc64/kernel/semaphore.c" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define SEMAPHORE_C ((char **)&hook_files[2]) static struct line_number_hook ppc64_line_number_hooks[] = { {"DoSyscall", ENTRY_S}, {"_switch", ENTRY_S}, {"ret_from_syscall_1", ENTRY_S}, {"ret_from_syscall_2", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"ret_from_except", ENTRY_S}, {"do_signal_ret", ENTRY_S}, {"ret_to_user_hook", ENTRY_S}, {"enter_rtas", ENTRY_S}, {"restore", ENTRY_S}, {"do_bottom_half_ret", ENTRY_S}, {"ret_to_user_hook", ENTRY_S}, {"_stext", HEAD_S}, {"_start", HEAD_S}, {"__start", HEAD_S}, {"__secondary_hold", HEAD_S}, {"DataAccessCont", HEAD_S}, {"DataAccess", HEAD_S}, {"i0x300", HEAD_S}, {"DataSegmentCont", HEAD_S}, {"InstructionAccessCont", HEAD_S}, {"InstructionAccess", HEAD_S}, {"i0x400", HEAD_S}, {"InstructionSegmentCont", HEAD_S}, {"HardwareInterrupt", HEAD_S}, {"do_IRQ_intercept", HEAD_S}, {"i0x600", HEAD_S}, {"ProgramCheck", HEAD_S}, {"i0x700", HEAD_S}, {"FPUnavailable", HEAD_S}, {"i0x800", HEAD_S}, {"Decrementer", HEAD_S}, {"timer_interrupt_intercept", HEAD_S}, {"SystemCall", HEAD_S}, {"trap_0f_cont", HEAD_S}, {"Trap_0f", HEAD_S}, {"InstructionTLBMiss", HEAD_S}, {"InstructionAddressInvalid", HEAD_S}, {"DataLoadTLBMiss", HEAD_S}, {"DataAddressInvalid", HEAD_S}, {"DataStoreTLBMiss", HEAD_S}, {"AltiVecUnavailable", HEAD_S}, {"DataAccess", HEAD_S}, {"InstructionAccess", HEAD_S}, {"DataSegment", HEAD_S}, {"InstructionSegment", HEAD_S}, {"transfer_to_handler", HEAD_S}, {"stack_ovf", HEAD_S}, {"load_up_fpu", HEAD_S}, {"KernelFP", HEAD_S}, {"load_up_altivec", HEAD_S}, {"KernelAltiVec", HEAD_S}, {"giveup_altivec", HEAD_S}, {"giveup_fpu", HEAD_S}, {"relocate_kernel", HEAD_S}, {"copy_and_flush", HEAD_S}, {"fix_mem_constants", HEAD_S}, {"apus_interrupt_entry", HEAD_S}, {"__secondary_start_gemini", HEAD_S}, {"__secondary_start_psurge", HEAD_S}, {"__secondary_start_psurge2", HEAD_S}, {"__secondary_start_psurge3", HEAD_S}, {"__secondary_start_psurge99", HEAD_S}, {"__secondary_start", HEAD_S}, {"setup_common_caches", HEAD_S}, {"setup_604_hid0", HEAD_S}, {"setup_750_7400_hid0", HEAD_S}, {"load_up_mmu", HEAD_S}, {"start_here", HEAD_S}, {"clear_bats", HEAD_S}, {"flush_tlbs", HEAD_S}, {"mmu_off", HEAD_S}, {"initial_bats", HEAD_S}, {"setup_disp_bat", HEAD_S}, {"m8260_gorom", HEAD_S}, {"sdata", HEAD_S}, {"empty_zero_page", HEAD_S}, {"swapper_pg_dir", HEAD_S}, {"cmd_line", HEAD_S}, {"intercept_table", HEAD_S}, {"set_context", HEAD_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void ppc64_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } void ppc64_compiler_warning_stub(void) { struct line_number_hook *lhp; lhp = &ppc64_line_number_hooks[0]; lhp++; ppc64_back_trace(NULL, NULL); ppc64_dump_line_number(0); } /* * Force the VM address-range selection via: * * --machdep vm=orig * --machdep vm=2.6.14 */ void parse_cmdline_args(void) { int index, i, c; char *p; char buf[BUFSIZE]; char *arglist[MAXARGS]; int lines = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %s\n\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { if (STRNEQ(arglist[i], "vm=")) { p = arglist[i] + strlen("vm="); if (strlen(p)) { if (STREQ(p, "orig")) { machdep->flags |= VM_ORIG; continue; } else if (STREQ(p, "2.6.14")) { machdep->flags |= VM_4_LEVEL; continue; } } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); lines++; } switch (machdep->flags & (VM_ORIG|VM_4_LEVEL)) { case VM_ORIG: error(NOTE, "using original PPC64 VM address ranges\n"); lines++; break; case VM_4_LEVEL: error(NOTE, "using 4-level pagetable PPC64 VM address ranges\n"); lines++; break; case (VM_ORIG|VM_4_LEVEL): error(WARNING, "cannot set both vm=orig and vm=2.6.14\n"); lines++; machdep->flags &= ~(VM_ORIG|VM_4_LEVEL); break; } if (lines) fprintf(fp, "\n"); } } /* * Initialize the per cpu data_offset values from paca structure. */ static int ppc64_paca_init(int map) { int i, cpus, nr_paca; char *cpu_paca_buf; ulong data_offset; ulong paca; if (!symbol_exists("paca")) error(FATAL, "PPC64: Could not find 'paca' symbol\n"); /* * In v2.6.34 ppc64, the upstream commit 1426d5a3 (powerpc: Dynamically * allocate pacas) now dynamically allocates the paca and have * changed data type of 'paca' symbol from array to pointer. With this * change in place crash utility fails to read vmcore generated for * upstream kernel. * Add a check for paca variable data type before accessing. */ if (get_symbol_type("paca", NULL, NULL) == TYPE_CODE_PTR) readmem(symbol_value("paca"), KVADDR, &paca, sizeof(ulong), "paca", FAULT_ON_ERROR); else paca = symbol_value("paca"); if (!MEMBER_EXISTS("paca_struct", "data_offset")) return kt->cpus; STRUCT_SIZE_INIT(ppc64_paca, "paca_struct"); data_offset = MEMBER_OFFSET("paca_struct", "data_offset"); cpu_paca_buf = GETBUF(SIZE(ppc64_paca)); if (!(nr_paca = get_array_length("paca", NULL, 0))) nr_paca = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); if (nr_paca > NR_CPUS) { error(WARNING, "PPC64: Number of paca entries (%d) greater than NR_CPUS (%d)\n", nr_paca, NR_CPUS); error(FATAL, "Recompile crash with larger NR_CPUS\n"); } for (i = cpus = 0; i < nr_paca; i++) { /* * CPU present or online or can exist in the system(possible)? */ if (!in_cpu_map(map, i)) continue; readmem(paca + (i * SIZE(ppc64_paca)), KVADDR, cpu_paca_buf, SIZE(ppc64_paca), "paca entry", FAULT_ON_ERROR); kt->__per_cpu_offset[i] = ULONG(cpu_paca_buf + data_offset); kt->flags |= PER_CPU_OFF; cpus++; } return cpus; } static int ppc64_get_cpu_map(void) { int map; if (cpu_map_addr("possible")) map = POSSIBLE_MAP; else if (cpu_map_addr("present")) map = PRESENT_MAP; else if (cpu_map_addr("online")) map = ONLINE_MAP; else if (cpu_map_addr("active")) map = ACTIVE_MAP; else { map = 0; error(FATAL, "PPC64: cannot find 'cpu_possible_map', " "'cpu_present_map', 'cpu_online_map' or 'cpu_active_map' symbols\n"); } return map; } /* * Updating any smp-related items that were possibly bypassed * or improperly initialized in kernel_init(). */ static void ppc64_init_cpu_info(void) { int i, map, cpus, nr_cpus; map = ppc64_get_cpu_map(); /* * starting from v2.6.36 we can not rely on paca structure to get * per cpu data_offset. The upstream commit fc53b420 overwrites * the paca pointer variable to point to static paca that contains * valid data_offset only for crashing cpu. * * But the kernel v2.6.36 ppc64 introduces __per_cpu_offset symbol * which was removed post v2.6.15 ppc64 and now we get the per cpu * data_offset from __per_cpu_offset symbol during kernel_init() * call. Hence for backward (pre-2.6.36) compatibility, call * ppc64_paca_init() only if symbol __per_cpu_offset does not exist. */ if (!symbol_exists("__per_cpu_offset")) cpus = ppc64_paca_init(map); else { if (!(nr_cpus = get_array_length("__per_cpu_offset", NULL, 0))) nr_cpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = cpus = 0; i < nr_cpus; i++) { if (!in_cpu_map(map, i)) continue; cpus++; } } switch (map) { case POSSIBLE_MAP: if (cpus > kt->cpus) { i = get_highest_cpu_online() + 1; if (i > kt->cpus) kt->cpus = i; } break; case ONLINE_MAP: case PRESENT_MAP: kt->cpus = cpus; break; } if (kt->cpus > 1) kt->flags |= SMP; } void ppc64_clear_machdep_cache(void) { if (machdep->machspec->last_level4_read != vt->kernel_pgd[0]) machdep->machspec->last_level4_read = 0; } static int ppc64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; physaddr_t phys1, phys2; ulong pp1, pp2; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->kvbase; vrp[cnt++].end = vt->high_memory; vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = first_vmalloc_address(); vrp[cnt++].end = last_vmalloc_address(); if (machdep->flags & VMEMMAP) { phys1 = (physaddr_t)(0); phys2 = (physaddr_t)VTOP((vt->high_memory - PAGESIZE())); if (phys_to_page(phys1, &pp1) && phys_to_page(phys2, &pp2)) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = pp1; vrp[cnt++].end = pp2; } } return cnt; } #endif /* PPC64 */ crash-7.1.4/.rh_rpm_package0000666000000000000000000000000612634305150014263 0ustar rootroot7.1.4 crash-7.1.4/vmware_vmss.h0000664000000000000000000000572212634305150014052 0ustar rootroot/* * vmware_vmss.h * * Copyright (c) 2015 VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Dyno Hongjun Fu */ #define CPTDUMP_OLD_MAGIC_NUMBER 0xbed0bed0 #define CPTDUMP_MAGIC_NUMBER 0xbed2bed2 #define CPTDUMP_PARTIAL_MAGIC_NUMBER 0xbed3bed3 #define CPTDUMP_RESTORED_MAGIC_NUMBER 0xbad1bad1 #define CPTDUMP_NORESTORE_MAGIC_NUMBER 0xbad2bad2 /* * Poor man's bit fields * TAG: | NAMELEN | NINDX | VALSIZE | * bits |15 8|7 6|5 0| * size | 8 | 2 | 6 | */ #define TAG_NAMELEN_MASK 0xFF #define TAG_NAMELEN_OFFSET 8 #define TAG_NINDX_MASK 0x3 #define TAG_NINDX_OFFSET 6 #define TAG_VALSIZE_MASK 0x3F #define TAG_VALSIZE_OFFSET 0 #define TAG_SIZE 2 /* * The value size has two special values to indicate blocks and compressed * blocks. */ #define TAG_ISBLOCK TAG_VALSIZE_MASK #define TAG_ISBLOCK_COMPRESSED (TAG_VALSIZE_MASK-1) #define MAKE_TAG(_nl, _nidx, _nb) \ (((_nl) & TAG_NAMELEN_MASK) << TAG_NAMELEN_OFFSET | \ ((_nidx) & TAG_NINDX_MASK) << TAG_NINDX_OFFSET | \ ((_nb) & TAG_VALSIZE_MASK) << TAG_VALSIZE_OFFSET) #define TAG_NAMELEN(_tag) (((_tag) >> TAG_NAMELEN_OFFSET) & TAG_NAMELEN_MASK) #define TAG_NINDX(_tag) (((_tag) >> TAG_NINDX_OFFSET) & TAG_NINDX_MASK) #define TAG_VALSIZE(_tag) (((_tag) >> TAG_VALSIZE_OFFSET) & TAG_VALSIZE_MASK) #define NULL_TAG MAKE_TAG(0, 0, 0) #define NO_INDEX (-1) /* * TRUE iff it's a (optionally compressed) block */ #define IS_BLOCK_TAG(_tag) (TAG_VALSIZE(_tag) == TAG_ISBLOCK || \ TAG_VALSIZE(_tag) == TAG_ISBLOCK_COMPRESSED) /* * TRUE iff it's a compressed block. */ #define IS_BLOCK_COMPRESSED_TAG(_tag) (TAG_VALSIZE(_tag) == TAG_ISBLOCK_COMPRESSED) struct cptdumpheader { uint32_t id; uint32_t version; uint32_t numgroups; }; typedef struct cptdumpheader cptdumpheader; #define MAX_LENGTH 64 struct cptgroupdesc { char name[MAX_LENGTH]; uint64_t position; uint64_t size; }; typedef struct cptgroupdesc cptgroupdesc; struct memregion { uint32_t startpagenum; uint32_t startppn; uint32_t size; }; typedef struct memregion memregion; #define MAX_REGIONS 3 struct vmssdata { int32_t cpt64bit; FILE *dfp; /* about the memory */ uint32_t alignmask; uint32_t regionscount; memregion regions[MAX_REGIONS]; uint64_t memoffset; uint64_t memsize; }; typedef struct vmssdata vmssdata; #define DEBUG_PARSE_PRINT(x) \ do { \ if (CRASHDEBUG(1)) { \ fprintf x; \ } \ } while(0) crash-7.1.4/makedumpfile.h0000664000000000000000000000315512634305150014142 0ustar rootroot/* * makedumpfile.h * * This code is for reading a dumpfile ganarated by makedumpfile command. * * Copyright (C) 2011 NEC Soft, Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Ken'ichi Ohmichi */ /* * makedumpfile header * For re-arranging the dump data on different architecture, all the * variables are defined by 64bits. The size of signature is aligned * to 64bits, and change the values to big endian. */ #define MAKEDUMPFILE_SIGNATURE "makedumpfile" #define NUM_SIG_MDF (sizeof(MAKEDUMPFILE_SIGNATURE) - 1) #define SIZE_SIG_MDF roundup(sizeof(char) * NUM_SIG_MDF, 8) #define SIG_LEN_MDF (SIZE_SIG_MDF / sizeof(char)) #define MAX_SIZE_MDF_HEADER (4096) /* max size of makedumpfile_header */ #define TYPE_FLAT_HEADER (1) /* type of flattened format */ #define VERSION_FLAT_HEADER (1) /* current version of flattened format */ #define END_FLAG_FLAT_HEADER (-1) struct makedumpfile_header { char signature[SIG_LEN_MDF]; /* = "makedumpfile" */ int64_t type; int64_t version; }; struct makedumpfile_data_header { int64_t offset; int64_t buf_size; }; crash-7.1.4/lkcd_x86_trace.h0000664000000000000000000004772612634305150014313 0ustar rootroot/* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ /* * lkcd_x86_trace.h * * Copyright (C) 2002, 2003, 2004, 2005, 2010 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2010 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted as noted from the following LKCD files: * * lkcdutils-4.1/libklib/include/asm-i386/kl_types.h * lkcdutils-4.1/lcrash/include/lc_command.h * lkcdutils-4.1/libklib/include/klib.h * lkcdutils-4.1/lcrash/include/asm-i386/lc_dis.h * lkcdutils-4.1/lcrash/include/asm-i386/lc_trace.h * lkcdutils-4.1/libutil/kl_queue.h * lkcdutils-4.1/libklib/include/kl_error.h */ #ifdef REDHAT #include "defs.h" #define TASK_STRUCT_SZ (SIZE(task_struct)) #define KL_PAGE_OFFSET (machdep->kvbase) #define LINUX_2_2_X(KL_LINUX_RELEASE) (VALID_MEMBER(task_struct_tss)) #define KLE_PRINT_TRACE_ERROR KLE_INVALID_KERNELSTACK typedef struct syment syment_t; #define s_addr value #define s_name name typedef uint32_t kaddr_t; extern int INT_EFRAME_SS; extern int INT_EFRAME_ESP; extern int INT_EFRAME_EFLAGS; extern int INT_EFRAME_CS; extern int INT_EFRAME_EIP; extern int INT_EFRAME_ERR; extern int INT_EFRAME_ES; extern int INT_EFRAME_DS; extern int INT_EFRAME_EAX; extern int INT_EFRAME_EBP; extern int INT_EFRAME_EDI; extern int INT_EFRAME_ESI; extern int INT_EFRAME_EDX; extern int INT_EFRAME_ECX; extern int INT_EFRAME_EBX; extern int INT_EFRAME_GS; extern ulong int_eframe[]; #endif /* REDHAT */ /* * lkcdutils-4.1/libklib/include/asm-i386/kl_types.h */ typedef uint32_t uaddr_t; typedef uint64_t k_error_t; /* * lkcdutils-4.1/lcrash/include/lc_command.h */ #define C_ALL 0x00000004 /* All elements */ #define C_PERM 0x00000008 /* Allocate perminant blocks */ #define C_TEMP 0 /* For completeness */ #define C_FULL 0x00000010 /* Full output */ /* * lkcdutils-4.1/libklib/include/klib.h */ #define K_TEMP 1 #define K_PERM 2 /* * lkcdutils-4.1/lcrash/include/asm-i386/lc_dis.h */ /* Buffer to hold a cache of instruction bytes...we have to make sure * that there are AT LEAST 15 unread bytes in the buffer at all times, * as this is the maximum number of bytest that can belong to a single * instruction. * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ typedef struct instr_buf_s { kaddr_t addr; int size; unsigned char *ptr; unsigned char buf[256]; } instr_buf_t; typedef struct opcode_rec_s { char *name; int Op1; int opdata1; int Op2; int opdata2; int Op3; int opdata3; } opcode_rec_t; typedef struct op_s { int op_type; int op_seg; int op_reg; int op_disp; int op_base; int op_index; int op_scale; kaddr_t op_addr; } op_t; typedef struct instr_rec_s { struct instr_rec_s *next; struct instr_rec_s *prev; kaddr_t addr; /* start address of instruction */ opcode_rec_t *opcodep; int size; int aflag; int dflag; unsigned int prefixes; unsigned int opcode; unsigned char modrm; unsigned char sib; int have_sib; /* needed because sib can be zero */ op_t operand[3]; } instr_rec_t; /* Addressing methods */ #define M_NONE 0 #define M_A 1 #define M_C 2 #define M_D 3 #define M_E 4 #define M_indirE 5 #define M_F 6 #define M_G 7 #define M_I 8 #define M_sI 9 #define M_J 10 #define M_M 11 #define M_O 12 #define M_P 13 #define M_Q 14 #define M_R 15 #define M_S 16 #define M_T 17 #define M_V 18 #define M_W 19 #define M_X 20 #define M_Y 21 #define M_MMX 22 #define M_EM 23 #define M_MS 24 #define M_GRP 25 #define M_REG 26 #define M_indirREG 27 #define M_FLOAT 28 #define M_FGRP 29 #define M_BAD 30 /* Must be last on list */ /* Operand data types */ #define T_NONE 0 #define T_a 1 #define T_b 2 #define T_c 3 #define T_d 4 #define T_dq 5 #define T_p 6 #define T_pi 7 #define T_ps 8 #define T_q 9 #define T_s 10 #define T_ss 11 #define T_si 12 #define T_v 13 #define T_w 14 #define T_BAD 15 /* Must be last in list */ /* Register operand types */ #define R_eAX 0 #define R_eCX 1 #define R_eDX 2 #define R_eBX 3 #define R_eSP 4 #define R_eBP 5 #define R_eSI 6 #define R_eDI 7 #define R_AX 8 #define R_CX 9 #define R_DX 10 #define R_BX 11 #define R_SP 12 #define R_BP 13 #define R_SI 14 #define R_DI 15 #define R_AL 16 #define R_CL 17 #define R_DL 18 #define R_BL 19 #define R_AH 20 #define R_CH 21 #define R_DH 22 #define R_BH 23 #define R_ES 24 #define R_CS 25 #define R_SS 26 #define R_DS 27 #define R_FS 28 #define R_GS 29 #define R_BX_SI 30 #define R_BX_DI 31 #define R_BP_SI 32 #define R_BP_DI 33 #define R_BAD 34 /* Must be last on list */ /* Operand codes */ #define BAD M_BAD, T_BAD #define NONE M_NONE, T_NONE #define Ap M_A, T_p #define Av M_A, T_v #define Cd M_C, T_d #define Dd M_D, T_d #define Dx M_D, T_x #define Td M_T, T_d #define Eb M_E, T_b #define indirEb M_indirE, T_b #define Ev M_E, T_v #define indirEv M_indirE, T_v #define Ew M_E, T_w #define Gb M_G, T_b #define Gv M_G, T_v #define Gw M_G, T_w #define Ib M_I, T_b #define sIb M_sI, T_b #define Iv M_I, T_v #define sIv M_sI, T_v #define Iw M_I, T_w #define sIw M_sI, T_w #define Jb M_J, T_b #define Jp M_J, T_p #define Jv M_J, T_v #define M M_M, T_NONE #define Ma M_M, T_a #define Mp M_M, T_p #define Ob M_O, T_b #define Ov M_O, T_v #define Pq M_P, T_q #define Qq M_Q, T_q #define Qd M_Q, T_d #define Rw M_R, T_w #define Rd M_R, T_d #define Sw M_S, T_w #define Vq M_V, T_q #define Vss M_V, T_ss #define Wq M_W, T_q #define Wss M_W, T_ss #define Xb M_X, T_b #define Xv M_X, T_v #define Yb M_Y, T_b #define Yv M_Y, T_v /* 32-bit */ #define eAX M_REG, R_eAX #define eBX M_REG, R_eBX #define eCX M_REG, R_eCX #define eDX M_REG, R_eDX #define eSP M_REG, R_eSP #define eBP M_REG, R_eBP #define eSI M_REG, R_eSI #define eDI M_REG, R_eDI /* 16-bit */ #define AX M_REG, R_AX #define BX M_REG, R_BX #define CX M_REG, R_CX #define DX M_REG, R_DX #define indirDX M_indirREG, R_DX #define DX M_REG, R_DX #define BP M_REG, R_BP #define SI M_REG, R_SI #define DI M_REG, R_DI #define SP M_REG, R_SP /* 8-bit */ #define AH M_REG, R_AH #define AL M_REG, R_AL #define BH M_REG, R_BH #define BL M_REG, R_BL #define CH M_REG, R_CH #define CL M_REG, R_CL #define DH M_REG, R_DH #define DL M_REG, R_DL /* Segment Registers */ #define cs M_REG, R_CS #define ds M_REG, R_DS #define ss M_REG, R_SS #define es M_REG, R_ES #define fs M_REG, R_FS #define gs M_REG, R_GS #define MX M_MMX, T_NONE #define EM M_EM, T_NONE #define MS M_MS, T_NONE #define GRP1b "GRP1b", M_GRP, 0 #define GRP1S "GRP1S", M_GRP, 1 #define GRP1Ss "GRP1Ss", M_GRP, 2 #define GRP2b "GRP2b", M_GRP, 3 #define GRP2S "GRP2S", M_GRP, 4 #define GRP2b_one "GRP2b_one", M_GRP, 5 #define GRP2S_one "GRP2S_one", M_GRP, 6 #define GRP2b_cl "GRP2b_cl", M_GRP, 7 #define GRP2S_cl "GRP2S_cl", M_GRP, 8 #define GRP3b "GRP3b", M_GRP, 9 #define GRP3S "GRP3S", M_GRP, 10 #define GRP4 "GRP4", M_GRP, 11 #define GRP5 "GRP5", M_GRP, 12 #define GRP6 "GRP6", M_GRP, 13 #define GRP7 "GRP7", M_GRP, 14 #define GRP8 "GRP8", M_GRP, 15 #define GRP9 "GRP9", M_GRP, 16 #define GRP10 "GRP10", M_GRP, 17 #define GRP11 "GRP11", M_GRP, 18 #define GRP12 "GRP12", M_GRP, 19 #define FLOAT "FLOAT", M_FLOAT, T_NONE #define ST M_FLOAT, T_NONE #define STi M_FLOAT, T_NONE #define FGRPd9_2 "FGRPd9_2", M_FGRP, 0 #define FGRPd9_4 "FGRPd9_4", M_FGRP, 1 #define FGRPd9_5 "FGRPd9_5", M_FGRP, 2 #define FGRPd9_6 "FGRPd9_6", M_FGRP, 3 #define FGRPd9_7 "FGRPd9_7", M_FGRP, 4 #define FGRPda_5 "FGRPda_5", M_FGRP, 5 #define FGRPdb_4 "FGRPdb_4", M_FGRP, 6 #define FGRPde_3 "FGRPde_3", M_FGRP, 7 #define FGRPdf_4 "FGRPdf_4", M_FGRP, 8 #define PREFIX_REPZ 0x0001 #define PREFIX_REPNZ 0x0002 #define PREFIX_LOCK 0x0004 #define PREFIX_CS 0x0008 #define PREFIX_SS 0x0010 #define PREFIX_DS 0x0020 #define PREFIX_ES 0x0040 #define PREFIX_FS 0x0080 #define PREFIX_GS 0x0100 #define PREFIX_DATA 0x0200 #define PREFIX_ADR 0x0400 #define PREFIX_FWAIT 0x0800 /* Operand types */ #define O_REG 0x0001 #define O_IMMEDIATE 0x0002 #define O_ADDR 0x0004 #define O_OFF 0x0008 #define O_DISP 0x0010 #define O_BASE 0x0020 #define O_INDEX 0x0040 #define O_SCALE 0x0080 #define O_INDIR 0x0100 #define O_SEG 0x0200 #define O_CR 0x0400 #define O_DB 0x0800 #define O_LPTR 0x1000 #ifndef REDHAT /* Function prototypes */ int get_instr_info( kaddr_t /* pc */, instr_rec_t * /* pointer to instr_rec_s struct */); instr_rec_t *get_instr_stream( kaddr_t /* program counter */, int /* before count */, int /* after count */); void free_instr_stream( instr_rec_t *); #endif /* !REDHAT */ /* * lkcdutils-4.1/lcrash/include/asm-i386/lc_trace.h */ /* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ #define STACK_SEGMENTS 1 #ifdef REDHAT #define STACK_SIZE (STACKSIZE()) #define KSTACK_SIZE (STACKSIZE()) #else /* REDHAT */ #define STACK_SIZE 0x2000 #endif /* !REDHAT */ #ifdef NOT #define INCLUDE_REGINFO 1 #endif #ifdef INCLUDE_REGINFO #define NUM_REGS 8 #define REGVAL_UNKNOWN 0 #define REGVAL_VALID 1 #define REGVAL_BAD 2 /* Value loaded into register before it was saved */ /* Register record */ typedef struct reg_rec { uint32_t state; uint32_t value; } reg_rec_t; #endif /* Stack frame */ typedef struct sframe_rec { struct sframe_rec *next; struct sframe_rec *prev; int flag; int level; char *funcname; char *srcfile; int line_no; kaddr_t pc; kaddr_t ra; kaddr_t sp; kaddr_t fp; uint32_t *asp; int frame_size; int ptr; uint64_t error; #ifdef INCLUDE_REGINFO reg_rec_t regs[NUM_REGS]; #endif } sframe_t; /* flag field of sframe_t */ #define EX_FRAME 0x1 /* this frame is an interrupt or exception frame, pt_regs field of sframe_t is valid in this case */ #define INCOMPLETE_EX_FRAME 0x2 #define SET_EX_FRAME_ADDR 0x4 /* Stack segment structure */ struct stack_s { int type; uint32_t size; kaddr_t addr; uint32_t *ptr; }; /* Stack trace header */ typedef struct trace_rec { int flags; kaddr_t task; struct task_struct *tsp; struct stack_s stack[STACK_SEGMENTS]; int stackcnt; sframe_t *frame; int nframes; #ifdef REDHAT struct bt_info *bt; #endif } trace_t; #define TF_TRACEREC_VALID 0x01 /* The trace_rec_s has been setup already! */ #define TF_SUPPRESS_HEADER 0x02 /* Suppress header output from trace cmds */ /* Stack types */ #define S_USERSTACK 0 #define S_KERNELSTACK 1 /* Stack frame updating macro */ #define UPDATE_FRAME(FUNCNAME, PC, RA, SP, FP, ASP, SRCNAME, LINE_NO, SIZE, FLAG) \ curframe->funcname = FUNCNAME; \ curframe->pc = PC; \ curframe->sp = SP; \ curframe->ra = RA; \ curframe->fp = FP; \ curframe->asp = ASP; \ curframe->srcfile = SRCNAME; \ curframe->line_no = LINE_NO; \ curframe->frame_size = SIZE; \ curframe->ptr = curstkidx; \ kl_enqueue((element_t **)&trace->frame, (element_t *)curframe); \ trace->nframes++; \ curframe->flag |= FLAG; \ #ifndef REDHAT /* Function prototypes */ void print_pc( kaddr_t /* PC */, FILE * /* output file pointer */); trace_t *alloc_trace_rec( int /* flag */); int setup_trace_rec(kaddr_t, kaddr_t, int, trace_t *); int find_trace(kaddr_t, kaddr_t, kaddr_t, kaddr_t, trace_t *, int); void trace_banner(FILE *); int print_traces(kaddr_t, int, int, FILE *); void print_trace(trace_t *, int, FILE *); void free_trace_rec(trace_t *t); int task_trace(kaddr_t, int, FILE *); int do_list(kaddr_t, int, FILE *); void live_vmdump(int, int); int do_report(int, FILE *); void stab_type_banner(FILE *, int); void ktype_banner(FILE *, int); void print_stab_type(stab_type_t *, int, FILE *); void print_ktype(kltype_t *, int, FILE *); void walk_ktype(kltype_t *, int, FILE *); int list_stab_types(int, FILE *); int list_ktypes(int, FILE *); void structlist(FILE *); int walk_structs(char *, char *, int, kaddr_t, int, FILE *); sframe_t *alloc_sframe(trace_t *, int); int add_frame(trace_t *, kaddr_t, kaddr_t); void finish_trace(trace_t *); int dumptask_trace(kaddr_t, dump_header_asm_t *, int, FILE *); #endif /* !REDHAT */ /* * lkcdutils-4.1/libutil/kl_queue.h */ /* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ #ifndef _KL_QUEUE_H #define _KL_QUEUE_H /* List element header */ typedef struct element_s { struct element_s *next; struct element_s *prev; } element_t; /* Some useful macros */ #define ENQUEUE(list, elem) \ kl_enqueue((element_t **)list, (element_t *)elem) #define DEQUEUE(list) kl_dequeue((element_t **)list) #define FINDQUEUE(list, elem) \ kl_findqueue((element_t **)list, (element_t *)elem) #define REMQUEUE(list, elem) kl_remqueue((element_t **)list, (element_t *)elem) typedef struct list_of_ptrs { element_t elem; unsigned long long val64; } list_of_ptrs_t; #define FINDLIST_QUEUE(list, elem, compare) \ kl_findlist_queue((list_of_ptrs_t **)list, \ (list_of_ptrs_t *)elem, compare) #ifndef REDHAT /** ** Function prototypes **/ /* Add a new element to the tail of a doubly linked list. */ void kl_enqueue( element_t** /* ptr to head of list */, element_t* /* ptr to element to add to the list */); /* Remove an element from the head of a doubly linked list. A pointer * to the element will be returned. In the event that the list is * empty, a NULL pointer will be returned. */ element_t *kl_dequeue( element_t** /* ptr to list head (first item removed) */); /* Checks to see if a particular element is in a list. If it is, a * value of one (1) will be returned. Otherwise, a value of zero (0) * will be returned. */ int kl_findqueue( element_t** /* ptr to head of list */, element_t* /* ptr to element to find on list */); /* Walks through a list of pointers to queues and looks for a * particular list. */ int kl_findlist_queue( list_of_ptrs_t** /* ptr to list of lists */, list_of_ptrs_t* /* ptr to list to look for */, int(*)(void *, void *) /* ptr to compare function */); /* Remove specified element from doubly linked list. */ void kl_remqueue( element_t** /* ptr to head of list */, element_t* /* ptr to element to remove from list */); #endif /* !REDHAT */ #endif /* _KL_QUEUE_H */ /* * lkcdutils-4.1/libklib/include/kl_error.h */ /* * kl_error.h * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ /** ** This header file contains basic definitions and declarations ** for the KLIB error handling facility. ** **/ #ifndef __KL_ERROR_H #define __KL_ERROR_H /* Error Classes */ #define KLEC_APP 0 #define KLEC_KLIB 1 #define KLEC_MEM 2 #define KLEC_SYM 3 #define KLEC_KERN 4 #define KLEC_CLASS_MASK 0x00000000ff000000 #define KLEC_CLASS_SHIFT 24 #define KLEC_ECODE_MASK 0x0000000000ffffff #define KLEC_TYPE_MASK 0xffffffff00000000 #define KLEC_TYPE_SHIFT 32 #define KLEC_CLASS(e) ((e & KLEC_CLASS_MASK) >> KLEC_CLASS_SHIFT) #define KLEC_ECODE(e) (e & KLEC_ECODE_MASK) #define KLEC_TYPE(e) ((e & KLEC_TYPE_MASK) >> KLEC_TYPE_SHIFT) extern uint64_t klib_error; void kl_reset_error(void); void kl_print_error(void); /** ** Some macros for accessing data in klib_error **/ #define KLIB_ERROR klib_error #define KL_ERROR klib_error #define KL_ERRORVAL klib_errorval #define KL_ERRORFP stderr /* Error codes * * There are basically two types of error codes -- with each type * residing in a single word in a two word error code value. The lower * 32-bits contains an error class and code that represents exactly * WHAT error occurred (e.g., non-numeric text in a numeric value * entered by a user, bad virtual address, etc.). * * The upper 32-bits represents what type of data was being referenced * when the error occurred (e.g., bad proc struct). Having two tiers of * error codes makes it easier to generate useful and specific error * messages. Note that is possible to have situations where one or the * other type of error codes is not set. This is OK as long as at least * one type s set. */ /** General klib error codes **/ #define KLE_KLIB (KLEC_KLIB << KLEC_CLASS_SHIFT) #define KLE_NO_MEMORY (KLE_KLIB|1) #define KLE_OPEN_ERROR (KLE_KLIB|2) #define KLE_ZERO_BLOCK (KLE_KLIB|3) #define KLE_INVALID_VALUE (KLE_KLIB|4) #define KLE_NULL_BUFF (KLE_KLIB|5) #define KLE_ZERO_SIZE (KLE_KLIB|6) #define KLE_ACTIVE (KLE_KLIB|7) #define KLE_MISC_ERROR (KLE_KLIB|97) #define KLE_NOT_SUPPORTED (KLE_KLIB|98) #define KLE_UNKNOWN_ERROR (KLE_KLIB|99) /** memory error codes **/ #define KLE_MEM (KLEC_MEM << KLEC_CLASS_SHIFT) #define KLE_BAD_MAP_FILE (KLE_MEM|1) #define KLE_BAD_DUMP (KLE_MEM|2) #define KLE_BAD_DUMPTYPE (KLE_MEM|3) #define KLE_INVALID_LSEEK (KLE_MEM|4) #define KLE_INVALID_READ (KLE_MEM|5) #define KLE_BAD_MEMINFO (KLE_MEM|6) #define KLE_INVALID_PADDR (KLE_MEM|7) #define KLE_INVALID_VADDR (KLE_MEM|8) #define KLE_INVALID_VADDR_ALIGN (KLE_MEM|9) #define KLE_INVALID_MAPPING (KLE_MEM|10) #define KLE_CMP_ERROR (KLE_MEM|11) #define KLE_INVALID_DUMP_MAGIC (KLE_MEM|12) #define KLE_KERNEL_MAGIC_MISMATCH (KLE_MEM|13) #define KLE_NO_END_SYMBOL (KLE_MEM|14) #define KLE_INVALID_DUMP_HEADER (KLE_MEM|15) #define KLE_DUMP_INDEX_CREATION (KLE_MEM|16) #define KLE_DUMP_HEADER_ONLY (KLE_MEM|17) /** symbol error codes **/ #define KLE_SYM (KLEC_SYM << KLEC_CLASS_SHIFT) #define KLE_NO_SYMTAB (KLE_SYM|1) #define KLE_NO_SYMBOLS (KLE_SYM|2) #define KLE_INVALID_TYPE (KLE_SYM|3) #define KLE_NO_MODULE_LIST (KLE_SYM|4) /** kernel data error codes **/ #define KLE_KERN (KLEC_KERN << KLEC_CLASS_SHIFT) #define KLE_INVALID_KERNELSTACK (KLE_KERN|1) #define KLE_INVALID_STRUCT_SIZE (KLE_KERN|2) #define KLE_BEFORE_RAM_OFFSET (KLE_KERN|3) #define KLE_AFTER_MAXPFN (KLE_KERN|4) #define KLE_AFTER_PHYSMEM (KLE_KERN|5) #define KLE_AFTER_MAXMEM (KLE_KERN|6) #define KLE_PHYSMEM_NOT_INSTALLED (KLE_KERN|7) #define KLE_NO_DEFTASK (KLE_KERN|8) #define KLE_PID_NOT_FOUND (KLE_KERN|9) #define KLE_DEFTASK_NOT_ON_CPU (KLE_KERN|10) #define KLE_NO_CURCPU (KLE_KERN|11) #define KLE_NO_CPU (KLE_KERN|12) #define KLE_SIG_ERROR (KLE_KERN|13) /** Error codes that indicate what type of data was bad. These are ** placed in the upper 32-bits of klib_error. **/ #define KLE_BAD_TASK_STRUCT (((uint64_t)1)<<32) #define KLE_BAD_SYMNAME (((uint64_t)2)<<32) #define KLE_BAD_SYMADDR (((uint64_t)3)<<32) #define KLE_BAD_FUNCADDR (((uint64_t)4)<<32) #define KLE_BAD_STRUCT (((uint64_t)5)<<32) #define KLE_BAD_FIELD (((uint64_t)6)<<32) #define KLE_BAD_PC (((uint64_t)7)<<32) #define KLE_BAD_RA (((uint64_t)8)<<32) #define KLE_BAD_SP (((uint64_t)9)<<32) #define KLE_BAD_EP (((uint64_t)10)<<32) #define KLE_BAD_SADDR (((uint64_t)11)<<32) #define KLE_BAD_KERNELSTACK (((uint64_t)12)<<32) #define KLE_BAD_LINENO (((uint64_t)13)<<32) #define KLE_MAP_FILE (((uint64_t)14)<<32) #define KLE_DUMP (((uint64_t)15)<<32) #define KLE_BAD_STRING (((uint64_t)16)<<32) #endif /* __KL_ERROR_H */ crash-7.1.4/xen_hyper_command.c0000664000000000000000000013325712634305150015200 0ustar rootroot/* * xen_hyper_command.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" #ifdef X86 char *xhregt[] = { "ebx", "ecx", "edx", "esi", "edi", "ebp", "eax", "ds", "es", "fs", "gs", "orig_eax", "eip", "cs", "eflags", "esp", "ss", NULL }; #endif #ifdef X86_64 char *xhregt[] = { "r15", "r14", "r13", "r12", "rbp", "rbx", "r11", "r10", "r9", "r8", "rax", "rcx", "rdx", "rsi", "rdi", "orig_rax", "rip", "cs", "eflags", "rsp", "ss", "fs", "gs", "ds", "es", "fs", "gs", NULL }; #endif #ifdef IA64 char *xhregt[] = { "aaa", "bbb", NULL }; #endif static void xen_hyper_do_domain(struct xen_hyper_cmd_args *da); static void xen_hyper_do_doms(struct xen_hyper_cmd_args *da); static void xen_hyper_show_doms(struct xen_hyper_domain_context *dc); static void xen_hyper_do_dumpinfo(ulong flag, struct xen_hyper_cmd_args *dia); static void xen_hyper_show_dumpinfo(ulong flag, struct xen_hyper_dumpinfo_context *dic); static void xen_hyper_do_pcpus(ulong flag, struct xen_hyper_cmd_args *pca); static void xen_hyper_show_pcpus(ulong flag, struct xen_hyper_pcpu_context *pcc); static void xen_hyper_do_sched(ulong flag, struct xen_hyper_cmd_args *scha); static void xen_hyper_show_sched(ulong flag, struct xen_hyper_sched_context *schc); static void xen_hyper_do_vcpu(struct xen_hyper_cmd_args *vca); static void xen_hyper_do_vcpus(struct xen_hyper_cmd_args *vca); static void xen_hyper_show_vcpus(struct xen_hyper_vcpu_context *vcc); static char *xen_hyper_domain_to_type(ulong domain, int *type, char *buf, int verbose); static char *xen_hyper_domain_context_to_type( struct xen_hyper_domain_context *dc, int *type, char *buf, int verbose); static int xen_hyper_str_to_domain_context(char *string, ulong *value, struct xen_hyper_domain_context **dcp); static int xen_hyper_str_to_dumpinfo_context(char *string, ulong *value, struct xen_hyper_dumpinfo_context **dicp); static int xen_hyper_strvcpu_to_vcpu_context(char *string, ulong *value, struct xen_hyper_vcpu_context **vccp); static int xen_hyper_strid_to_vcpu_context(char *strdom, char *strvc, ulong *valdom, ulong *valvc, struct xen_hyper_vcpu_context **vccp); static int xen_hyper_str_to_pcpu_context(char *string, ulong *value, struct xen_hyper_pcpu_context **pccp); /* * Display domain struct. */ void xen_hyper_cmd_domain(void) { struct xen_hyper_cmd_args da; struct xen_hyper_domain_context *dc; ulong val; int c, cnt, type, bogus; BZERO(&da, sizeof(struct xen_hyper_cmd_args)); while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_domain_context(args[optind], &val, &dc); switch (type) { case XEN_HYPER_STR_DID: case XEN_HYPER_STR_DOMAIN: da.value[cnt] = val; da.type[cnt] = type; da.addr[cnt] = dc->domain; da.context[cnt] = dc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid domain or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } da.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_domain(&da); } /* * Do the work requested by xen_hyper_cmd_dom(). */ static void xen_hyper_do_domain(struct xen_hyper_cmd_args *da) { int i; if (da->cnt) { if (da->cnt == 1) { xhdt->last = da->context[0]; } for (i = 0; i < da->cnt; i++) { dump_struct("domain", da->addr[i], 0); } } else { dump_struct("domain", xhdt->last->domain, 0); } } /* * Display domain status. */ void xen_hyper_cmd_doms(void) { struct xen_hyper_cmd_args da; struct xen_hyper_domain_context *dc; ulong val; int c, cnt, type, bogus; BZERO(&da, sizeof(struct xen_hyper_cmd_args)); while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_domain_context(args[optind], &val, &dc); switch (type) { case XEN_HYPER_STR_DID: case XEN_HYPER_STR_DOMAIN: da.value[cnt] = val; da.type[cnt] = type; da.addr[cnt] = dc->domain; da.context[cnt] = dc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid domain or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } da.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_doms(&da); } /* * Do the work requested by xen_hyper_cmd_doms(). */ static void xen_hyper_do_doms(struct xen_hyper_cmd_args *da) { struct xen_hyper_domain_context *dca; char buf1[XEN_HYPER_CMD_BUFSIZE]; char buf2[XEN_HYPER_CMD_BUFSIZE]; int i; sprintf(buf1, " DID %s ST T ", mkstring(buf2, VADDR_PRLEN, CENTER|RJUST, "DOMAIN")); mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|RJUST, "MAXPAGE"); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|RJUST, "TOTPAGE"); strncat(buf1, " VCPU ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|RJUST, "SHARED_I"); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "P2M_MFN"); fprintf(fp, "%s\n", buf1); if (da->cnt) { for (i = 0; i < da->cnt; i++) { xen_hyper_show_doms(da->context[i]); } } else { for (i = 0, dca=xhdt->context_array; i < XEN_HYPER_NR_DOMAINS(); i++, dca++) { xen_hyper_show_doms(dca); } } } static void xen_hyper_show_doms(struct xen_hyper_domain_context *dc) { char *act, *crash; uint cpuid; int type, i, j; struct xen_hyper_pcpu_context *pcc; #if defined(X86) || defined(X86_64) char *shared_info; #elif defined(IA64) char *domain_struct; ulong pgd; #endif char buf1[XEN_HYPER_CMD_BUFSIZE]; char buf2[XEN_HYPER_CMD_BUFSIZE]; if (!(dc->domain)) { return; } #if defined(X86) || defined(X86_64) shared_info = GETBUF(XEN_HYPER_SIZE(shared_info)); if (dc->shared_info) { if (!readmem(dc->shared_info, KVADDR, shared_info, XEN_HYPER_SIZE(shared_info), "fill_shared_info_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { error(WARNING, "cannot fill shared_info struct.\n"); BZERO(shared_info, XEN_HYPER_SIZE(shared_info)); } } #elif defined(IA64) if ((domain_struct = xen_hyper_read_domain(dc->domain)) == NULL) { error(FATAL, "cannot read domain.\n"); } #endif act = NULL; for_cpu_indexes(i, cpuid) { pcc = xen_hyper_id_to_pcpu_context(cpuid); for (j = 0; j < dc->vcpu_cnt; j++) { if (pcc->current_vcpu == dc->vcpu[j]) { act = ">"; break; } } if (act) break; } if (act == NULL) act = " "; if (xht->crashing_vcc && dc->domain == xht->crashing_vcc->domain) { crash = "*"; } else { crash = " "; } sprintf(buf1, "%s%s%5d ", act, crash, dc->domain_id); mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, (char *)(dc->domain)); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); sprintf(&buf1[strlen(buf1)], "%s ", xen_hyper_domain_state_string(dc, buf2, !VERBOSE)); sprintf(&buf1[strlen(buf1)], "%s ", xen_hyper_domain_context_to_type(dc, &type, buf2, !VERBOSE)); mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|INT_HEX|RJUST, MKSTR((long)(dc->max_pages))); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|INT_HEX|RJUST, MKSTR((long)(dc->tot_pages))); sprintf(&buf1[strlen(buf1)], " %3d ", dc->vcpu_cnt); mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dc->shared_info)); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); #if defined(X86) || defined(X86_64) if (dc->shared_info) { mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(ULONG(shared_info + XEN_HYPER_OFFSET(shared_info_arch) + XEN_HYPER_OFFSET(arch_shared_info_pfn_to_mfn_frame_list_list))) ); } else { mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "----"); } FREEBUF(shared_info); #elif defined(IA64) pgd = ULONG(domain_struct + XEN_HYPER_OFFSET(domain_arch) + XEN_HYPER_OFFSET(arch_domain_mm) + XEN_HYPER_OFFSET(mm_struct_pgd)); if (pgd) { mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR((pgd - DIRECTMAP_VIRT_START) >> machdep->pageshift)); } else { mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "----"); } #endif fprintf(fp, "%s\n", buf1); } /* * Display ELF Notes information. */ void xen_hyper_cmd_dumpinfo(void) { struct xen_hyper_cmd_args dia; ulong flag; ulong val; struct xen_hyper_dumpinfo_context *dic; int c, cnt, type, bogus; BZERO(&dia, sizeof(struct xen_hyper_cmd_args)); flag = val =0; dic = NULL; while ((c = getopt(argcnt, args, "rt")) != EOF) { switch(c) { case 't': flag |= XEN_HYPER_DUMPINFO_TIME; break; case 'r': flag |= XEN_HYPER_DUMPINFO_REGS; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_dumpinfo_context(args[optind], &val, &dic); switch (type) { case XEN_HYPER_STR_PCID: case XEN_HYPER_STR_ADDR: dia.value[cnt] = val; dia.type[cnt] = type; dia.context[cnt] = dic; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid note address or id " "value: %s\n\n", args[optind]); bogus++; break; } } else { error(INFO, "invalid note address or id " "value: %s\n\n", args[optind]); } optind++; } dia.cnt = cnt; if (!cnt && bogus) { return; } xen_hyper_do_dumpinfo(flag, &dia); } /* * Do the work requested by xen_hyper_cmd_dumpinfo(). */ static void xen_hyper_do_dumpinfo(ulong flag, struct xen_hyper_cmd_args *dia) { struct xen_hyper_dumpinfo_context *dic; char buf[XEN_HYPER_CMD_BUFSIZE]; int i, cnt; if (dia->cnt) { cnt = dia->cnt; } else { cnt = XEN_HYPER_NR_PCPUS(); } for (i = 0; i < cnt; i++) { if (i == 0 || flag & XEN_HYPER_DUMPINFO_REGS || flag & XEN_HYPER_DUMPINFO_TIME) { if (i) { fprintf(fp, "\n"); } sprintf(buf, " PCID "); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "ENOTE"); // sprintf(&buf[strlen(buf)], " PID PPID PGRP SID"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CORE"); if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "XEN_CORE"); } if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "XEN_INFO"); } fprintf(fp, "%s\n", buf); } if (dia->cnt) { dic = dia->context[i]; } else { dic = xen_hyper_id_to_dumpinfo_context(xht->cpu_idxs[i]); } xen_hyper_show_dumpinfo(flag, dic); } } static void xen_hyper_show_dumpinfo(ulong flag, struct xen_hyper_dumpinfo_context *dic) { char buf[XEN_HYPER_CMD_BUFSIZE]; char *note_buf; ulong addr; ulong *regs; long tv_sec, tv_usec; int i, regcnt; if (!dic || !dic->note) { return; } note_buf = dic->ELF_Prstatus_ptr; sprintf(buf, "%5d ", dic->pcpu_id); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dic->note)); #if 0 pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_pid)); sprintf(&buf[strlen(buf)], " %5d ", pid); pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_ppid)); sprintf(&buf[strlen(buf)], "%5d ", pid); pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_pgrp)); sprintf(&buf[strlen(buf)], "%5d ", pid); pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_sid)); sprintf(&buf[strlen(buf)], "%5d", pid); #endif strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dic->note)); if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dic->note + xhdit->core_size)); } if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); if (xhdit->xen_info_cpu == dic->pcpu_id) mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dic->note + xhdit->core_size + xhdit->xen_core_size)); else mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "--"); } fprintf(fp, "%s\n", buf); if (flag & XEN_HYPER_DUMPINFO_TIME) { sprintf(buf, " "); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "tv_sec"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "tv_usec"); fprintf(fp, "%s\n", buf); addr = (ulong)note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_utime); for (i = 0; i < 4; i++, addr += XEN_HYPER_SIZE(ELF_Timeval)) { switch (i) { case 0: sprintf(buf, " pr_utime "); break; case 1: sprintf(buf, " pr_stime "); break; case 2: sprintf(buf, " pr_cutime "); break; case 3: sprintf(buf, " pr_cstime "); break; } tv_sec = LONG(addr + XEN_HYPER_OFFSET(ELF_Timeval_tv_sec)); tv_usec = LONG(addr + XEN_HYPER_OFFSET(ELF_Timeval_tv_sec) + XEN_HYPER_OFFSET(ELF_Timeval_tv_usec)); mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(tv_sec)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(tv_usec)); fprintf(fp, "%s\n", buf); } } if (flag & XEN_HYPER_DUMPINFO_REGS) { regcnt = XEN_HYPER_SIZE(ELF_Gregset) / sizeof(long); addr = (ulong)note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg); regs = (ulong *)addr; fprintf(fp, "Register information(%lx):\n", dic->note + xhdit->core_offset + XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg)); for (i = 0; i < regcnt; i++, regs++) { if (xhregt[i] == NULL) { break; } fprintf(fp, " %s = ", xhregt[i]); fprintf(fp, "0x%s\n", mkstring(buf, LONG_PRLEN, LONG_HEX|LJUST, MKSTR(*regs))); } } } /* * Dump the Xen conring in chronological order. */ void xen_hyper_cmd_log(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); xen_hyper_dump_log(); } void xen_hyper_dump_log(void) { uint conringp, warp, len, idx, i; ulong conring; char *buf; char last = 0; uint32_t conring_size; if (get_symbol_type("conring", NULL, NULL) == TYPE_CODE_ARRAY) conring = symbol_value("conring"); else get_symbol_data("conring", sizeof(ulong), &conring); get_symbol_data("conringp", sizeof(uint), &conringp); if (symbol_exists("conring_size")) get_symbol_data("conring_size", sizeof(uint32_t), &conring_size); else conring_size = XEN_HYPER_CONRING_SIZE; if (conringp >= conring_size) { idx = conringp & (conring_size - 1); len = conring_size; warp = TRUE; } else { idx = 0; len = conringp; warp = FALSE; } buf = GETBUF(conring_size); readmem(conring, KVADDR, buf, conring_size, "conring contents", FAULT_ON_ERROR); wrap_around: for (i = idx; i < len; i++) { if (buf[i]) { fputc(ascii(buf[i]) ? buf[i] : '.', fp); last = buf[i]; } } if (warp) { len = idx; idx = 0; warp = FALSE; goto wrap_around; } if (last != '\n') { fprintf(fp, "\n"); } FREEBUF(buf); } /* * Display physical cpu information. */ void xen_hyper_cmd_pcpus(void) { struct xen_hyper_cmd_args pca; struct xen_hyper_pcpu_context *pcc; ulong flag; ulong val; int c, cnt, type, bogus; BZERO(&pca, sizeof(struct xen_hyper_cmd_args)); flag= 0; while ((c = getopt(argcnt, args, "rt")) != EOF) { switch(c) { case 'r': flag |= XEN_HYPER_PCPUS_REGS; break; case 't': flag |= XEN_HYPER_PCPUS_TSS; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_pcpu_context(args[optind], &val, &pcc); switch (type) { case XEN_HYPER_STR_PCID: case XEN_HYPER_STR_PCPU: pca.value[cnt] = val; pca.type[cnt] = type; pca.addr[cnt] = pcc->pcpu; pca.context[cnt] = pcc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid pcpu or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } pca.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_pcpus(flag, &pca); } /* * Do the work requested by xen_hyper_cmd_pcpu(). */ static void xen_hyper_do_pcpus(ulong flag, struct xen_hyper_cmd_args *pca) { struct xen_hyper_pcpu_context *pcc; uint cpuid; int i; if (pca->cnt) { for (i = 0; i < pca->cnt; i++) { xen_hyper_show_pcpus(flag, pca->context[i]); flag |= XEN_HYPER_PCPUS_1STCALL; } } else { for_cpu_indexes(i, cpuid) { pcc = xen_hyper_id_to_pcpu_context(cpuid); xen_hyper_show_pcpus(flag, pcc); flag |= XEN_HYPER_PCPUS_1STCALL; } } } static void xen_hyper_show_pcpus(ulong flag, struct xen_hyper_pcpu_context *pcc) { char *act = " "; char buf[XEN_HYPER_CMD_BUFSIZE]; if (!(pcc->pcpu)) { return; } if (XEN_HYPER_CRASHING_CPU() == pcc->processor_id) { act = " *"; } if ((flag & XEN_HYPER_PCPUS_REGS) || (flag & XEN_HYPER_PCPUS_TSS) || !(flag & XEN_HYPER_PCPUS_1STCALL)) { if (((flag & XEN_HYPER_PCPUS_REGS) || (flag & XEN_HYPER_PCPUS_TSS)) && (flag & XEN_HYPER_PCPUS_1STCALL)) { fprintf(fp, "\n"); } sprintf(buf, " PCID "); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "PCPU"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CUR-VCPU"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "TSS"); fprintf(fp, "%s\n", buf); } sprintf(buf, "%s%5d ", act, pcc->processor_id); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(pcc->pcpu)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(pcc->current_vcpu)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(pcc->init_tss)); fprintf(fp, "%s\n", buf); if (flag & XEN_HYPER_PCPUS_REGS) { fprintf(fp, "Register information:\n"); dump_struct("cpu_user_regs", pcc->guest_cpu_user_regs, 0); } if (flag & XEN_HYPER_PCPUS_TSS) { fprintf(fp, "init_tss information:\n"); dump_struct("tss_struct", pcc->init_tss, 0); } } /* * Display schedule info. */ void xen_hyper_cmd_sched(void) { struct xen_hyper_cmd_args scha; struct xen_hyper_pcpu_context *pcc; ulong flag; ulong val; int c, cnt, type, bogus; BZERO(&scha, sizeof(struct xen_hyper_cmd_args)); flag = 0; while ((c = getopt(argcnt, args, "v")) != EOF) { switch(c) { case 'v': flag |= XEN_HYPER_SCHED_VERBOSE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_pcpu_context(args[optind], &val, &pcc); switch (type) { case XEN_HYPER_STR_PCID: scha.value[cnt] = val; scha.type[cnt] = type; scha.context[cnt] = &xhscht->sched_context_array[val]; cnt++; break; case XEN_HYPER_STR_PCPU: case XEN_HYPER_STR_INVALID: error(INFO, "invalid pcpu id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } scha.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_sched(flag, &scha); } /* * Do the work requested by xen_hyper_cmd_pcpu(). */ static void xen_hyper_do_sched(ulong flag, struct xen_hyper_cmd_args *scha) { struct xen_hyper_sched_context *schc; uint cpuid; int i; fprintf(fp, "Scheduler name : %s\n\n", xhscht->name); if (scha->cnt) { for (i = 0; i < scha->cnt; i++) { xen_hyper_show_sched(flag, scha->context[i]); flag |= XEN_HYPER_SCHED_1STCALL; } } else { for_cpu_indexes(i, cpuid) { schc = &xhscht->sched_context_array[cpuid]; xen_hyper_show_sched(flag, schc); flag |= XEN_HYPER_SCHED_1STCALL; } } } static void xen_hyper_show_sched(ulong flag, struct xen_hyper_sched_context *schc) { char buf[XEN_HYPER_CMD_BUFSIZE]; if (!(schc->schedule_data)) { return; } if ((flag & XEN_HYPER_SCHED_VERBOSE) || !(flag & XEN_HYPER_SCHED_1STCALL)) { if ((flag & XEN_HYPER_SCHED_1STCALL) && (flag & XEN_HYPER_SCHED_VERBOSE)) { fprintf(fp, "\n"); } sprintf(buf, " CPU "); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "SCH-DATA"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "SCH-PRIV"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CUR-VCPU"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "IDL-VCPU"); if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|RJUST, "TICK"); } fprintf(fp, "%s\n", buf); } sprintf(buf, "%5d ", schc->cpu_id); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->schedule_data)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->sched_priv)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->curr)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->idle)); if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->tick)); } fprintf(fp, "%s\n", buf); if (flag & XEN_HYPER_SCHED_VERBOSE) { ; } } /* * Display general system info. */ void xen_hyper_cmd_sys(void) { int c; ulong sflag; sflag = FALSE; while ((c = getopt(argcnt, args, "c")) != EOF) { switch(c) { case 'c': sflag = TRUE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { if (sflag) fprintf(fp, "No support argument\n"); /* display config info here. */ else xen_hyper_display_sys_stats(); return; } } /* * Display system stats at init-time or for the sys command. */ void xen_hyper_display_sys_stats(void) { struct new_utsname *uts; char buf1[XEN_HYPER_CMD_BUFSIZE]; char buf2[XEN_HYPER_CMD_BUFSIZE]; ulong mhz; int len, flag; uts = &xht->utsname; len = 11; flag = XEN_HYPER_PRI_R; /* * It's now safe to unlink the remote namelist. */ if (pc->flags & UNLINK_NAMELIST) { unlink(pc->namelist); pc->flags &= ~UNLINK_NAMELIST; pc->flags |= NAMELIST_UNLINKED; } if (REMOTE()) { switch (pc->flags & (NAMELIST_LOCAL|NAMELIST_UNLINKED|NAMELIST_SAVED)) { case NAMELIST_UNLINKED: XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag, (buf1, "%s (temporary)\n", pc->namelist)); break; case (NAMELIST_UNLINKED|NAMELIST_SAVED): case NAMELIST_LOCAL: XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag, (buf1, "%s\n", pc->namelist)); break; } } else { if (pc->system_map) { XEN_HYPER_PRI(fp, len, "SYSTEM MAP: ", buf1, flag, (buf1, "%s\n", pc->system_map)); XEN_HYPER_PRI(fp, len, "DEBUG KERNEL: ", buf1, flag, (buf1, "%s\n", pc->namelist)); } else { XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag, (buf1, "%s\n", pc->namelist)); } } if (pc->debuginfo_file) { XEN_HYPER_PRI(fp, len, "DEBUGINFO: ", buf1, flag, (buf1, "%s\n", pc->debuginfo_file)); } else if (pc->namelist_debug) { XEN_HYPER_PRI(fp, len, "DEBUG KERNEL: ", buf1, flag, (buf1, "%s\n", pc->namelist_debug)); } XEN_HYPER_PRI_CONST(fp, len, "DUMPFILE: ", flag); if (ACTIVE()) { if (REMOTE_ACTIVE()) fprintf(fp, "%s@%s (remote live system)\n", pc->server_memsrc, pc->server); else fprintf(fp, "%s\n", pc->live_memsrc); } else { if (REMOTE_DUMPFILE()) fprintf(fp, "%s@%s (remote dumpfile)", pc->server_memsrc, pc->server); else fprintf(fp, "%s", pc->dumpfile); fprintf(fp, "\n"); } XEN_HYPER_PRI(fp, len, "CPUS: ", buf1, flag, (buf1, "%d\n", XEN_HYPER_NR_PCPUS())); XEN_HYPER_PRI(fp, len, "DOMAINS: ", buf1, flag, (buf1, "%d\n", XEN_HYPER_NR_DOMAINS())); /* !!!Display a date here if it can be found. */ XEN_HYPER_PRI(fp, len, "UPTIME: ", buf1, flag, (buf1, "%s\n", (xen_hyper_get_uptime_hyper() ? convert_time(xen_hyper_get_uptime_hyper(), buf2) : "--:--:--"))); /* !!!Display a version here if it can be found. */ XEN_HYPER_PRI_CONST(fp, len, "MACHINE: ", flag); if (strlen(uts->machine)) { fprintf(fp, "%s ", uts->machine); } else { fprintf(fp, "unknown "); } if ((mhz = machdep->processor_speed())) fprintf(fp, "(%ld Mhz)\n", mhz); else fprintf(fp, "(unknown Mhz)\n"); XEN_HYPER_PRI(fp, len, "MEMORY: ", buf1, flag, (buf1, "%s\n", get_memory_size(buf2))); if (XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) return; } /* * Display vcpu struct. */ void xen_hyper_cmd_vcpu(void) { struct xen_hyper_cmd_args vca; struct xen_hyper_vcpu_context *vcc; ulong flag; ulong valvc, valdom; int c, cnt, type, bogus; BZERO(&vca, sizeof(struct xen_hyper_cmd_args)); flag = 0; while ((c = getopt(argcnt, args, "i")) != EOF) { switch(c) { case 'i': flag |= XEN_HYPER_VCPUS_ID; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { if (flag & XEN_HYPER_VCPUS_ID) { type = xen_hyper_strid_to_vcpu_context( args[optind], args[optind+1], &valdom, &valvc, &vcc); } else { type = xen_hyper_strvcpu_to_vcpu_context( args[optind], &valvc, &vcc); } switch (type) { case XEN_HYPER_STR_VCID: case XEN_HYPER_STR_VCPU: vca.value[cnt] = valvc; vca.type[cnt] = type; vca.addr[cnt] = vcc->vcpu; vca.context[cnt] = vcc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid vcpu or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; if (flag & XEN_HYPER_VCPUS_ID) optind++; } vca.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_vcpu(&vca); } /* * Do the work requested by xen_hyper_cmd_vcpu(). */ static void xen_hyper_do_vcpu(struct xen_hyper_cmd_args *vca) { int i; if (vca->cnt) { if (vca->cnt == 1) { xhvct->last = vca->context[0]; } for (i = 0; i < vca->cnt; i++) { dump_struct("vcpu", vca->addr[i], 0); } } else { dump_struct("vcpu", xhvct->last->vcpu, 0); } } /* * Display vcpu status. */ void xen_hyper_cmd_vcpus(void) { struct xen_hyper_cmd_args vca; struct xen_hyper_vcpu_context *vcc; ulong flag; ulong valvc, valdom; int c, cnt, type, bogus; BZERO(&vca, sizeof(struct xen_hyper_cmd_args)); flag = 0; while ((c = getopt(argcnt, args, "i")) != EOF) { switch(c) { case 'i': flag |= XEN_HYPER_VCPUS_ID; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { if (flag & XEN_HYPER_VCPUS_ID) { type = xen_hyper_strid_to_vcpu_context( args[optind], args[optind+1], &valdom, &valvc, &vcc); } else { type = xen_hyper_strvcpu_to_vcpu_context( args[optind], &valvc, &vcc); } switch (type) { case XEN_HYPER_STR_VCID: case XEN_HYPER_STR_VCPU: vca.value[cnt] = valvc; vca.type[cnt] = type; vca.addr[cnt] = vcc->vcpu; vca.context[cnt] = vcc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid vcpu or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } vca.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_vcpus(&vca); } /* * Do the work requested by xen_hyper_cmd_vcpus(). */ static void xen_hyper_do_vcpus(struct xen_hyper_cmd_args *vca) { struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vcc; char buf1[XEN_HYPER_CMD_BUFSIZE]; char buf2[XEN_HYPER_CMD_BUFSIZE]; int i, j; fprintf(fp, " VCID PCID %s ST T DOMID %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "VCPU"), mkstring(buf2, VADDR_PRLEN, CENTER|RJUST, "DOMAIN")); if (vca->cnt) { for (i = 0; i < vca->cnt; i++) { xen_hyper_show_vcpus(vca->context[i]); } } else { for (i = 0, vcca = xhvct->vcpu_context_arrays; i < XEN_HYPER_NR_DOMAINS(); i++, vcca++) { for (j = 0, vcc = vcca->context_array; j < vcca->context_array_valid; j++, vcc++) { xen_hyper_show_vcpus(vcc); } } } } static void xen_hyper_show_vcpus(struct xen_hyper_vcpu_context *vcc) { int type; char *act, *crash; char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_pcpu_context *pcc; domid_t domid; if (!(vcc->vcpu)) { return; } if((pcc = xen_hyper_id_to_pcpu_context(vcc->processor))) { if (pcc->current_vcpu == vcc->vcpu) { act = ">"; } else { act = " "; } } else { act = " "; } if (xht->crashing_vcc && vcc->vcpu == xht->crashing_vcc->vcpu) { crash = "*"; } else { crash = " "; } sprintf(buf, "%s%s%5d %5d ", act, crash, vcc->vcpu_id, vcc->processor); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(vcc->vcpu)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); xen_hyper_vcpu_state_string(vcc, &buf[strlen(buf)], !VERBOSE); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); xen_hyper_domain_to_type(vcc->domain, &type, &buf[strlen(buf)], !VERBOSE); if ((domid = xen_hyper_domain_to_id(vcc->domain)) == XEN_HYPER_DOMAIN_ID_INVALID) { sprintf(&buf[strlen(buf)], " ????? "); } else { sprintf(&buf[strlen(buf)], " %5d ", domid); } mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(vcc->domain)); fprintf(fp, "%s\n", buf); } /* * Get string for domain status. * - This may need some data in domain struct. */ char * xen_hyper_domain_state_string(struct xen_hyper_domain_context *dc, char *buf, int verbose) { ulong stat; stat = xen_hyper_domain_state(dc); if (stat == XEN_HYPER_DOMF_ERROR) { sprintf(buf, verbose ? "(unknown)" : "??"); } else if (XEN_HYPER_VALID_MEMBER(domain_domain_flags)) { if (stat & XEN_HYPER_DOMF_shutdown) { sprintf(buf, verbose ? "DOMAIN_SHUTDOWN" : "SF"); } else if (stat & XEN_HYPER_DOMF_dying) { sprintf(buf, verbose ? "DOMAIN_DYING" : "DY"); } else if (stat & XEN_HYPER_DOMF_ctrl_pause) { sprintf(buf, verbose ? "DOMAIN_CTRL_PAUSE" : "CP"); } else if (stat & XEN_HYPER_DOMF_polling) { sprintf(buf, verbose ? "DOMAIN_POLLING" : "PO"); } else if (stat & XEN_HYPER_DOMF_paused) { sprintf(buf, verbose ? "DOMAIN_PAUSED" : "PA"); } else { sprintf(buf, verbose ? "DOMAIN_RUNNING" : "RU"); } } else { if (stat & XEN_HYPER_DOMS_shutdown) { sprintf(buf, verbose ? "DOMAIN_SHUTDOWN" : "SF"); } else if (stat & XEN_HYPER_DOMS_shuttingdown) { sprintf(buf, verbose ? "DOMAIN_SHUTTINGDOWN" : "SH"); } else if (stat & XEN_HYPER_DOMS_dying) { sprintf(buf, verbose ? "DOMAIN_DYING" : "DY"); } else if (stat & XEN_HYPER_DOMS_ctrl_pause) { sprintf(buf, verbose ? "DOMAIN_CTRL_PAUSE" : "CP"); } else if (stat & XEN_HYPER_DOMS_polling) { sprintf(buf, verbose ? "DOMAIN_POLLING" : "PO"); } else { sprintf(buf, verbose ? "DOMAIN_RUNNING" : "RU"); } } return buf; } /* * Get string for vcpu status. * - This may need some data in vcpu struct. */ char * xen_hyper_vcpu_state_string(struct xen_hyper_vcpu_context *vcc, char *buf, int verbose) { int stat; stat = xen_hyper_vcpu_state(vcc); if (stat == XEN_HYPER_RUNSTATE_ERROR) { sprintf(buf, verbose ? "(unknown)" : "??"); } else if (stat == XEN_HYPER_RUNSTATE_running || stat == XEN_HYPER_RUNSTATE_runnable) { sprintf(buf, verbose ? "VCPU_RUNNING" : "RU"); } else if (stat == XEN_HYPER_RUNSTATE_blocked) { sprintf(buf, verbose ? "VCPU_BLOCKED" : "BL"); } else if (stat == XEN_HYPER_RUNSTATE_offline) { sprintf(buf, verbose ? "VCPU_OFFLINE" : "OF"); } else { sprintf(buf, verbose ? "(unknown)" : "??"); } return buf; } /* * Get domain type from domain address. */ static char * xen_hyper_domain_to_type(ulong domain, int *type, char *buf, int verbose) { struct xen_hyper_domain_context *dc; if ((dc = xen_hyper_domain_to_domain_context(domain)) == NULL) { error(WARNING, "cannot get context from domain address.\n"); return NULL; } return xen_hyper_domain_context_to_type(dc, type, buf, verbose); } /* * Get domain type from domain context. */ static char * xen_hyper_domain_context_to_type(struct xen_hyper_domain_context *dc, int *type, char *buf, int verbose) { if (!dc) { *type = XEN_HYPER_DOMAIN_TYPE_INVALID; return NULL; } else if (dc->domain_id == XEN_HYPER_DOMID_IO) { *type = XEN_HYPER_DOMAIN_TYPE_IO; sprintf(buf, verbose ? "dom_io" : "O"); } else if (dc->domain_id == XEN_HYPER_DOMID_XEN) { *type = XEN_HYPER_DOMAIN_TYPE_XEN; sprintf(buf, verbose ? "dom_xen" : "X"); } else if (dc->domain_id == XEN_HYPER_DOMID_IDLE) { *type = XEN_HYPER_DOMAIN_TYPE_IDLE; sprintf(buf, verbose ? "idle domain" : "I"); } else if (dc == xhdt->dom0) { *type = XEN_HYPER_DOMAIN_TYPE_DOM0; sprintf(buf, verbose ? "domain 0" : "0"); } else { *type = XEN_HYPER_DOMAIN_TYPE_GUEST; sprintf(buf, verbose ? "domain U" : "U"); } return buf; } /* * Check a type for value. And return domain context. */ static int xen_hyper_str_to_domain_context(char *string, ulong *value, struct xen_hyper_domain_context **dcp) { ulong dvalue, hvalue; int found, type; char *s; struct xen_hyper_domain_context *dc_did, *dc_ddc, *dc_hid, *dc_hdc; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; dc_did = dc_ddc = dc_hid = dc_hdc = NULL; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if ((dc_did = xen_hyper_id_to_domain_context(dvalue))) found++; if ((dc_ddc = xen_hyper_domain_to_domain_context(dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((dc_hid = xen_hyper_id_to_domain_context(hvalue))) found++; if ((dc_hdc = xen_hyper_domain_to_domain_context(hvalue))) found++; } switch (found) { case 2: if (dc_did && dc_hid) { *dcp = dc_did; *value = dvalue; type = STR_PID; } break; case 1: if (dc_did) { *dcp = dc_did; *value = dvalue; type = XEN_HYPER_STR_DID; } if (dc_ddc) { *dcp = dc_ddc; *value = dvalue; type = XEN_HYPER_STR_DOMAIN; } if (dc_hid) { *dcp = dc_hid; *value = hvalue; type = XEN_HYPER_STR_DID; } if (dc_hdc) { *dcp = dc_hdc; *value = hvalue; type = XEN_HYPER_STR_DOMAIN; } break; } return type; } /* * Display a vcpu context. */ void xen_hyper_show_vcpu_context(struct xen_hyper_vcpu_context *vcc) { char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_pcpu_context *pcc; struct xen_hyper_domain_context *dc; int len, flag; len = 6; len += pc->flags & RUNTIME ? 0 : 5; flag = XEN_HYPER_PRI_R; if (!(pcc = xen_hyper_id_to_pcpu_context(vcc->processor))) { error(WARNING, "cannot get pcpu context vcpu belongs.\n"); return; } if (!(dc = xen_hyper_domain_to_domain_context(vcc->domain))) { error(WARNING, "cannot get domain context vcpu belongs.\n"); return; } XEN_HYPER_PRI(fp, len, "PCPU-ID: ", buf, flag, (buf, "%d\n", vcc->processor)); XEN_HYPER_PRI(fp, len, "PCPU: ", buf, flag, (buf, "%lx\n", pcc->pcpu)); XEN_HYPER_PRI(fp, len, "VCPU-ID: ", buf, flag, (buf, "%d\n", vcc->vcpu_id)); XEN_HYPER_PRI(fp, len, "VCPU: ", buf, flag, (buf, "%lx ", vcc->vcpu)); fprintf(fp, "(%s)\n", xen_hyper_vcpu_state_string(vcc, buf, VERBOSE)); XEN_HYPER_PRI(fp, len, "DOMAIN-ID: ", buf, flag, (buf, "%d\n", dc->domain_id)); XEN_HYPER_PRI(fp, len, "DOMAIN: ", buf, flag, (buf, "%lx ", vcc->domain)); fprintf(fp, "(%s)\n", xen_hyper_domain_state_string(dc, buf, VERBOSE)); XEN_HYPER_PRI_CONST(fp, len, "STATE: ", flag); if (machdep->flags & HWRESET) { fprintf(fp, "HARDWARE RESET"); } else if (machdep->flags & INIT) { fprintf(fp, "INIT"); } else if (xen_hyper_is_vcpu_crash(vcc)) { fprintf(fp, "CRASH"); } else { fprintf(fp, "ACTIVE"); } fprintf(fp, "\n"); } /* * Check a type for value. And return dump information context address. */ static int xen_hyper_str_to_dumpinfo_context(char *string, ulong *value, struct xen_hyper_dumpinfo_context **dicp) { ulong dvalue, hvalue; struct xen_hyper_dumpinfo_context *note_did, *note_hid; struct xen_hyper_dumpinfo_context *note_dad, *note_had; int found, type; char *s; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; note_did = note_hid = note_dad = note_had = 0; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if (dvalue > XEN_HYPER_MAX_CPUS()) { note_dad = xen_hyper_note_to_dumpinfo_context(dvalue); } else { note_did = xen_hyper_id_to_dumpinfo_context(dvalue); } found++; } if ((hvalue != BADADDR)) { if (hvalue > XEN_HYPER_MAX_CPUS()) { note_had = xen_hyper_note_to_dumpinfo_context(hvalue); } else { note_hid = xen_hyper_id_to_dumpinfo_context(hvalue); } found++; } switch (found) { case 2: if (note_did && note_hid) { *value = dvalue; *dicp = note_did; type = XEN_HYPER_STR_PCID; } break; case 1: if (note_did) { *value = dvalue; *dicp = note_did; type = XEN_HYPER_STR_PCID; } if (note_hid) { *value = hvalue; *dicp = note_hid; type = XEN_HYPER_STR_PCID; } if (note_dad) { *value = dvalue; *dicp = note_dad; type = XEN_HYPER_STR_ADDR; } if (note_had) { *value = hvalue; *dicp = note_had; type = XEN_HYPER_STR_ADDR; } break; } return type; } /* * Check a type for value. And return vcpu context. */ static int xen_hyper_strvcpu_to_vcpu_context(char *string, ulong *value, struct xen_hyper_vcpu_context **vccp) { ulong dvalue, hvalue; int found, type; char *s; struct xen_hyper_vcpu_context *vcc_dvc, *vcc_hvc; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; vcc_dvc = vcc_hvc = NULL; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if ((vcc_dvc = xen_hyper_vcpu_to_vcpu_context(dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((vcc_hvc = xen_hyper_vcpu_to_vcpu_context(hvalue))) found++; } switch (found) { case 1: if (vcc_dvc) { *vccp = vcc_dvc; *value = dvalue; type = XEN_HYPER_STR_VCPU; } if (vcc_hvc) { *vccp = vcc_hvc; *value = hvalue; type = XEN_HYPER_STR_VCPU; } break; } return type; } /* * Check a type for id value. And return vcpu context. */ static int xen_hyper_strid_to_vcpu_context(char *strdom, char *strvc, ulong *valdom, ulong *valvc, struct xen_hyper_vcpu_context **vccp) { ulong dvalue, hvalue; int found, type; char *s; struct xen_hyper_vcpu_context *vcc_did, *vcc_hid; struct xen_hyper_domain_context *dc; if (strdom == NULL || strvc == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } if (xen_hyper_str_to_domain_context(strdom, valdom, &dc) == XEN_HYPER_STR_INVALID) { error(INFO, "invalid domain id string.\n"); return STR_INVALID; } s = strvc; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; vcc_did = vcc_hid = NULL; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if ((vcc_did = xen_hyper_id_to_vcpu_context(dc->domain, XEN_HYPER_DOMAIN_ID_INVALID, dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((vcc_hid = xen_hyper_id_to_vcpu_context(dc->domain, XEN_HYPER_DOMAIN_ID_INVALID, hvalue))) found++; } switch (found) { case 2: if (vcc_did && vcc_hid) { *vccp = vcc_did; *valvc = dvalue; type = XEN_HYPER_STR_VCID; } break; case 1: if (vcc_did) { *vccp = vcc_did; *valvc = dvalue; type = XEN_HYPER_STR_VCID; } if (vcc_hid) { *vccp = vcc_hid; *valvc = hvalue; type = XEN_HYPER_STR_VCID; } break; } return type; } /* * Check a type for value. And return pcpu context. */ static int xen_hyper_str_to_pcpu_context(char *string, ulong *value, struct xen_hyper_pcpu_context **pccp) { ulong dvalue, hvalue; int found, type; char *s; struct xen_hyper_pcpu_context *pcc_did, *pcc_dpc, *pcc_hid, *pcc_hpc; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; pcc_did = pcc_dpc = pcc_hid = pcc_hpc = NULL; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if ((pcc_did = xen_hyper_id_to_pcpu_context(dvalue))) found++; if ((pcc_dpc = xen_hyper_pcpu_to_pcpu_context(dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((pcc_hid = xen_hyper_id_to_pcpu_context(hvalue))) found++; if ((pcc_hpc = xen_hyper_pcpu_to_pcpu_context(hvalue))) found++; } switch (found) { case 2: if (pcc_did && pcc_hid) { *pccp = pcc_did; *value = dvalue; type = STR_PID; } break; case 1: if (pcc_did) { *pccp = pcc_did; *value = dvalue; type = XEN_HYPER_STR_PCID; } if (pcc_dpc) { *pccp = pcc_dpc; *value = dvalue; type = XEN_HYPER_STR_PCPU; } if (pcc_hid) { *pccp = pcc_hid; *value = hvalue; type = XEN_HYPER_STR_PCID; } if (pcc_hpc) { *pccp = pcc_hpc; *value = hvalue; type = XEN_HYPER_STR_PCPU; } break; } return type; } #endif crash-7.1.4/xendump.c0000664000000000000000000022731212634305150013155 0ustar rootroot/* * xendump.c * * Copyright (C) 2006-2011, 2013-2014 David Anderson * Copyright (C) 2006-2011, 2013-2014 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "xendump.h" static struct xendump_data xendump_data = { 0 }; static struct xendump_data *xd = &xendump_data; static int xc_save_verify(char *); static int xc_core_verify(char *, char *); static int xc_save_read(void *, int, ulong, physaddr_t); static int xc_core_read(void *, int, ulong, physaddr_t); static int xc_core_mfns(ulong, FILE *); static void poc_store(ulong, off_t); static off_t poc_get(ulong, int *); static void xen_dump_vmconfig(FILE *); static void xc_core_create_pfn_tables(void); static ulong xc_core_pfn_to_page_index(ulong); static int xc_core_pfn_valid(ulong); static void xendump_print(char *fmt, ...); static int xc_core_elf_verify(char *, char *); static void xc_core_elf_dump(void); static char *xc_core_elf_mfn_to_page(ulong, char *); static int xc_core_elf_mfn_to_page_index(ulong); static ulong xc_core_elf_pfn_valid(ulong); static ulong xc_core_elf_pfn_to_page_index(ulong); static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *); static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *); static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int); static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int); static char *xc_core_strtab(uint32_t, char *); static void xc_core_dump_elfnote(off_t, size_t, int); static void xc_core_elf_pfn_init(void); #define ELFSTORE 1 #define ELFREAD 0 /* * Determine whether a file is a xendump creation, and if TRUE, * initialize the xendump_data structure. */ int is_xendump(char *file) { int verified; char buf[BUFSIZE]; if ((xd->xfd = open(file, O_RDWR)) < 0) { if ((xd->xfd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return FALSE; } } if (read(xd->xfd, buf, BUFSIZE) != BUFSIZE) return FALSE; if (machine_type("X86") || machine_type("X86_64")) xd->page_size = 4096; else if (machine_type("IA64") && !machdep->pagesize) xd->page_size = 16384; else xd->page_size = machdep->pagesize; verified = xc_save_verify(buf) || xc_core_verify(file, buf); if (!verified) close(xd->xfd); return (verified); } /* * Verify whether the dump was created by the xc_domain_dumpcore() * library function in libxc/xc_core.c. */ static int xc_core_verify(char *file, char *buf) { struct xc_core_header *xcp; xcp = (struct xc_core_header *)buf; if (xc_core_elf_verify(file, buf)) return TRUE; if ((xcp->xch_magic != XC_CORE_MAGIC) && (xcp->xch_magic != XC_CORE_MAGIC_HVM)) return FALSE; if (!xcp->xch_nr_vcpus) { error(INFO, "faulty xc_core dump file header: xch_nr_vcpus is 0\n\n"); fprintf(stderr, " xch_magic: %x\n", xcp->xch_magic); fprintf(stderr, " xch_nr_vcpus: %d\n", xcp->xch_nr_vcpus); fprintf(stderr, " xch_nr_pages: %d\n", xcp->xch_nr_pages); fprintf(stderr, " xch_ctxt_offset: %d\n", xcp->xch_ctxt_offset); fprintf(stderr, " xch_index_offset: %d\n", xcp->xch_index_offset); fprintf(stderr, " xch_pages_offset: %d\n\n", xcp->xch_pages_offset); clean_exit(1); } xd->xc_core.header.xch_magic = xcp->xch_magic; xd->xc_core.header.xch_nr_vcpus = xcp->xch_nr_vcpus; xd->xc_core.header.xch_nr_pages = xcp->xch_nr_pages; xd->xc_core.header.xch_ctxt_offset = (off_t)xcp->xch_ctxt_offset; xd->xc_core.header.xch_index_offset = (off_t)xcp->xch_index_offset; xd->xc_core.header.xch_pages_offset = (off_t)xcp->xch_pages_offset; xd->flags |= (XENDUMP_LOCAL | XC_CORE_ORIG | XC_CORE_P2M_CREATE); if (xc_core_mfns(XC_CORE_64BIT_HOST, stderr)) xd->flags |= XC_CORE_64BIT_HOST; if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; } /* * Do the work for read_xendump() for the XC_CORE dumpfile format. */ static int xc_core_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) { ulong pfn, page_index; off_t offset; int redundant; if (xd->flags & (XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE)) xc_core_create_pfn_tables(); pfn = (ulong)BTOP(paddr); if ((offset = poc_get(pfn, &redundant))) { if (!redundant) { if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; xd->last_pfn = pfn; } BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if ((page_index = xc_core_pfn_to_page_index(pfn)) == PFN_NOT_FOUND) return READ_ERROR; offset = xd->xc_core.header.xch_pages_offset + ((off_t)(page_index) * (off_t)xd->page_size); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; poc_store(pfn, offset); BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } /* * Verify whether the dumpfile was created by the "xm save" facility. * This gets started by the "save" function in XendCheckpoint.py, and * then by xc_save.c, with the work done in the xc_linux_save() library * function in libxc/xc_linux_save.c. */ #define MAX_BATCH_SIZE 1024 /* * Number of P2M entries in a page. */ #define ULPP (xd->page_size/sizeof(unsigned long)) /* * Number of P2M entries in the pfn_to_mfn_frame_list. */ #define P2M_FL_ENTRIES (((xd->xc_save.nr_pfns)+ULPP-1)/ULPP) /* * Size in bytes of the pfn_to_mfn_frame_list. */ #define P2M_FL_SIZE ((P2M_FL_ENTRIES)*sizeof(unsigned long)) #define XTAB (0xf<<28) /* invalid page */ #define LTAB_MASK XTAB static int xc_save_verify(char *buf) { int i, batch_count, done_batch, *intptr; ulong flags, *ulongptr; ulong batch_index, total_pages_read; ulong N; if (!STRNEQ(buf, XC_SAVE_SIGNATURE)) return FALSE; if (lseek(xd->xfd, strlen(XC_SAVE_SIGNATURE), SEEK_SET) == -1) return FALSE; flags = XC_SAVE; if (CRASHDEBUG(1)) { fprintf(stderr, "\"%s\"\n", buf); fprintf(stderr, "endian: %d %s\n", __BYTE_ORDER, __BYTE_ORDER == __BIG_ENDIAN ? "__BIG_ENDIAN" : (__BYTE_ORDER == __LITTLE_ENDIAN ? "__LITTLE_ENDIAN" : "???")); } /* * size of vmconfig data structure (big-endian) */ if (read(xd->xfd, buf, sizeof(int)) != sizeof(int)) return FALSE; intptr = (int *)buf; if (CRASHDEBUG(1) && BYTE_SWAP_REQUIRED(__BIG_ENDIAN)) { fprintf(stderr, "byte-swap required for this:\n"); for (i = 0; i < sizeof(int); i++) fprintf(stderr, "[%x]", buf[i] & 0xff); fprintf(stderr, ": %x -> ", *intptr); } xd->xc_save.vmconfig_size = swab32(*intptr); if (CRASHDEBUG(1)) fprintf(stderr, "%x\n", xd->xc_save.vmconfig_size); if (!(xd->xc_save.vmconfig_buf = (char *)malloc (xd->xc_save.vmconfig_size))) error(FATAL, "cannot malloc xc_save vmconfig space."); if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); if (!(xd->xc_save.region_pfn_type = (ulong *)calloc (MAX_BATCH_SIZE, sizeof(ulong)))) error(FATAL, "cannot malloc region_pfn_type\n"); if (read(xd->xfd, xd->xc_save.vmconfig_buf, xd->xc_save.vmconfig_size) != xd->xc_save.vmconfig_size) goto xc_save_bailout; /* * nr_pfns (native byte order) */ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) goto xc_save_bailout; ulongptr = (ulong *)buf; if (CRASHDEBUG(1)) { for (i = 0; i < sizeof(ulong); i++) fprintf(stderr, "[%x]", buf[i] & 0xff); fprintf(stderr, ": %lx (nr_pfns)\n", *ulongptr); } xd->xc_save.nr_pfns = *ulongptr; if (machine_type("IA64")) goto xc_save_ia64; /* * Get a local copy of the live_P2M_frame_list */ if (!(xd->xc_save.p2m_frame_list = (unsigned long *)malloc(P2M_FL_SIZE))) error(FATAL, "cannot allocate p2m_frame_list array"); if (!(xd->xc_save.batch_offsets = (off_t *)calloc((size_t)P2M_FL_ENTRIES, sizeof(off_t)))) error(FATAL, "cannot allocate batch_offsets array"); xd->xc_save.batch_count = P2M_FL_ENTRIES; if (read(xd->xfd, xd->xc_save.p2m_frame_list, P2M_FL_SIZE) != P2M_FL_SIZE) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "pre-batch file pointer: %lld\n", (ulonglong)lseek(xd->xfd, 0L, SEEK_CUR)); /* * ... * int batch_count * ulong region pfn_type[batch_count] * page 0 * page 1 * ... * page batch_count-1 * (repeat) */ total_pages_read = 0; batch_index = 0; done_batch = FALSE; while (!done_batch) { xd->xc_save.batch_offsets[batch_index] = (off_t) lseek(xd->xfd, 0L, SEEK_CUR); if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "batch[%ld]: %d ", batch_index, batch_count); batch_index++; if (batch_index >= P2M_FL_ENTRIES) { fprintf(stderr, "more than %ld batches encountered?\n", P2M_FL_ENTRIES); goto xc_save_bailout; } switch (batch_count) { case 0: if (CRASHDEBUG(1)) { fprintf(stderr, ": Batch work is done: %ld pages read (P2M_FL_ENTRIES: %ld)\n", total_pages_read, P2M_FL_ENTRIES); } done_batch = TRUE; continue; case -1: if (CRASHDEBUG(1)) fprintf(stderr, ": Entering page verify mode\n"); continue; default: if (batch_count > MAX_BATCH_SIZE) { if (CRASHDEBUG(1)) fprintf(stderr, ": Max batch size exceeded. Giving up.\n"); done_batch = TRUE; continue; } if (CRASHDEBUG(1)) fprintf(stderr, "\n"); break; } if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != batch_count * sizeof(ulong)) goto xc_save_bailout; for (i = 0; i < batch_count; i++) { unsigned long pagetype; unsigned long pfn; pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; if (pagetype == XTAB) /* a bogus/unmapped page: skip it */ continue; if (pfn > xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(stderr, "batch_count: %d pfn %ld out of range", batch_count, pfn); } if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) goto xc_save_bailout; total_pages_read++; } } /* * Get the list of PFNs that are not in the psuedo-phys map */ if (read(xd->xfd, &xd->xc_save.pfns_not, sizeof(xd->xc_save.pfns_not)) != sizeof(xd->xc_save.pfns_not)) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "PFNs not in pseudo-phys map: %d\n", xd->xc_save.pfns_not); if ((total_pages_read + xd->xc_save.pfns_not) != xd->xc_save.nr_pfns) error(WARNING, "nr_pfns: %ld != (total pages: %ld + pages not saved: %d)\n", xd->xc_save.nr_pfns, total_pages_read, xd->xc_save.pfns_not); xd->xc_save.pfns_not_offset = lseek(xd->xfd, 0L, SEEK_CUR); if (lseek(xd->xfd, sizeof(ulong) * xd->xc_save.pfns_not, SEEK_CUR) == -1) goto xc_save_bailout; xd->xc_save.vcpu_ctxt_offset = lseek(xd->xfd, 0L, SEEK_CUR); lseek(xd->xfd, 0, SEEK_END); lseek(xd->xfd, -((off_t)(xd->page_size)), SEEK_CUR); xd->xc_save.shared_info_page_offset = lseek(xd->xfd, 0L, SEEK_CUR); xd->flags |= (XENDUMP_LOCAL | flags); kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; xc_save_ia64: /* * Completely different format for ia64: * * ... * pfn # * page data * pfn # * page data * ... */ free(xd->poc); xd->poc = NULL; free(xd->xc_save.region_pfn_type); xd->xc_save.region_pfn_type = NULL; if (!(xd->xc_save.ia64_page_offsets = (ulong *)calloc(xd->xc_save.nr_pfns, sizeof(off_t)))) error(FATAL, "cannot allocate ia64_page_offsets array"); /* * version */ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) goto xc_save_bailout; xd->xc_save.ia64_version = *((ulong *)buf); if (CRASHDEBUG(1)) fprintf(stderr, "ia64 version: %lx\n", xd->xc_save.ia64_version); /* * xen_domctl_arch_setup structure */ if (read(xd->xfd, buf, sizeof(xen_domctl_arch_setup_t)) != sizeof(xen_domctl_arch_setup_t)) goto xc_save_bailout; if (CRASHDEBUG(1)) { xen_domctl_arch_setup_t *setup = (xen_domctl_arch_setup_t *)buf; fprintf(stderr, "xen_domctl_arch_setup:\n"); fprintf(stderr, " flags: %lx\n", (ulong)setup->flags); fprintf(stderr, " bp: %lx\n", (ulong)setup->bp); fprintf(stderr, " maxmem: %lx\n", (ulong)setup->maxmem); fprintf(stderr, " xsi_va: %lx\n", (ulong)setup->xsi_va); fprintf(stderr, "hypercall_imm: %x\n", setup->hypercall_imm); } for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { if (read(xd->xfd, &N, sizeof(N)) != sizeof(N)) goto xc_save_bailout; if (N < xd->xc_save.nr_pfns) xd->xc_save.ia64_page_offsets[N] = lseek(xd->xfd, 0, SEEK_CUR); else error(WARNING, "[%d]: pfn of %lx (0x%lx) in ia64 canonical page list exceeds %ld\n", i, N, N, xd->xc_save.nr_pfns); if (CRASHDEBUG(1)) { if ((i < 10) || (N >= (xd->xc_save.nr_pfns-10))) fprintf(stderr, "[%d]: %ld\n%s", i, N, i == 9 ? "...\n" : ""); } if ((N+1) >= xd->xc_save.nr_pfns) break; if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) goto xc_save_bailout; } if (CRASHDEBUG(1)) { for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { if (!xd->xc_save.ia64_page_offsets[i]) N++; } fprintf(stderr, "%ld out of %ld pfns not dumped\n", N, xd->xc_save.nr_pfns); } xd->flags |= (XENDUMP_LOCAL | flags | XC_SAVE_IA64); kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; xc_save_bailout: error(INFO, "xc_save_verify: \"LinuxGuestRecord\" file handling/format error\n"); if (xd->xc_save.p2m_frame_list) { free(xd->xc_save.p2m_frame_list); xd->xc_save.p2m_frame_list = NULL; } if (xd->xc_save.batch_offsets) { free(xd->xc_save.batch_offsets); xd->xc_save.batch_offsets = NULL; } if (xd->xc_save.vmconfig_buf) { free(xd->xc_save.vmconfig_buf); xd->xc_save.vmconfig_buf = NULL; } if (xd->page) { free(xd->page); xd->page = NULL; } return FALSE; } /* * Do the work for read_xendump() for the XC_SAVE dumpfile format. */ static int xc_save_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int b, i, redundant; ulong reqpfn; int batch_count; off_t file_offset; reqpfn = (ulong)BTOP(paddr); if (CRASHDEBUG(8)) fprintf(xd->ofp, "xc_save_read(bufptr: %lx cnt: %d addr: %lx paddr: %llx (%ld, 0x%lx)\n", (ulong)bufptr, cnt, addr, (ulonglong)paddr, reqpfn, reqpfn); if (xd->flags & XC_SAVE_IA64) { if (reqpfn >= xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "xc_save_read: pfn %lx too large: nr_pfns: %lx\n", reqpfn, xd->xc_save.nr_pfns); return SEEK_ERROR; } file_offset = xd->xc_save.ia64_page_offsets[reqpfn]; if (!file_offset) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "xc_save_read: pfn %lx not stored in xendump\n", reqpfn); return SEEK_ERROR; } if (reqpfn != xd->last_pfn) { if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; } else { xd->redundant++; xd->cache_hits++; } xd->accesses++; xd->last_pfn = reqpfn; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if ((file_offset = poc_get(reqpfn, &redundant))) { if (!redundant) { if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; xd->last_pfn = reqpfn; } else if (CRASHDEBUG(1)) console("READ %ld (0x%lx) skipped!\n", reqpfn, reqpfn); BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } /* * ... * int batch_count * ulong region pfn_type[batch_count] * page 0 * page 1 * ... * page batch_count-1 * (repeat) */ for (b = 0; b < xd->xc_save.batch_count; b++) { if (lseek(xd->xfd, xd->xc_save.batch_offsets[b], SEEK_SET) == -1) return SEEK_ERROR; if (CRASHDEBUG(8)) fprintf(xd->ofp, "check batch[%d]: offset: %llx\n", b, (ulonglong)xd->xc_save.batch_offsets[b]); if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) return READ_ERROR; switch (batch_count) { case 0: if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) { fprintf(xd->ofp, "batch[%d]: has count of zero -- bailing out on pfn %ld\n", b, reqpfn); } return READ_ERROR; case -1: return READ_ERROR; default: if (CRASHDEBUG(8)) fprintf(xd->ofp, "batch[%d]: offset: %llx batch count: %d\n", b, (ulonglong)xd->xc_save.batch_offsets[b], batch_count); break; } if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != batch_count * sizeof(ulong)) return READ_ERROR; for (i = 0; i < batch_count; i++) { unsigned long pagetype; unsigned long pfn; pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; if (pagetype == XTAB) /* a bogus/unmapped page: skip it */ continue; if (pfn > xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(stderr, "batch_count: %d pfn %ld out of range", batch_count, pfn); } if (pfn == reqpfn) { file_offset = lseek(xd->xfd, 0, SEEK_CUR); poc_store(pfn, file_offset); if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) return SEEK_ERROR; } } return READ_ERROR; } /* * Stash a pfn's offset. If they're all in use, put it in the * least-used slot that's closest to the beginning of the array. */ static void poc_store(ulong pfn, off_t file_offset) { int i; struct pfn_offset_cache *poc, *plow; ulong curlow; curlow = ~(0UL); plow = NULL; poc = xd->poc; for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { if (poc->cnt == 0) { poc->cnt = 1; poc->pfn = pfn; poc->file_offset = file_offset; xd->last_pfn = pfn; return; } if (poc->cnt < curlow) { curlow = poc->cnt; plow = poc; } } plow->cnt = 1; plow->pfn = pfn; plow->file_offset = file_offset; xd->last_pfn = pfn; } /* * Check whether a pfn's offset has been cached. */ static off_t poc_get(ulong pfn, int *redundant) { int i; struct pfn_offset_cache *poc; xd->accesses++; if (pfn == xd->last_pfn) { xd->redundant++; *redundant = TRUE; return 1; } else *redundant = FALSE; poc = xd->poc; for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { if (poc->cnt && (poc->pfn == pfn)) { poc->cnt++; xd->cache_hits++; return poc->file_offset; } } return 0; } /* * Perform any post-dumpfile determination stuff here. */ int xendump_init(char *unused, FILE *fptr) { if (!XENDUMP_VALID()) return FALSE; xd->ofp = fptr; return TRUE; } int read_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (pc->curcmd_flags & XEN_MACHINE_ADDR) return READ_ERROR; switch (xd->flags & (XC_SAVE|XC_CORE_ORIG|XC_CORE_ELF)) { case XC_SAVE: return xc_save_read(bufptr, cnt, addr, paddr); case XC_CORE_ORIG: case XC_CORE_ELF: return xc_core_read(bufptr, cnt, addr, paddr); default: return READ_ERROR; } } int read_xendump_hyper(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { ulong pfn, page_index; off_t offset; pfn = (ulong)BTOP(paddr); /* ODA: pfn == mfn !!! */ if ((page_index = xc_core_mfn_to_page_index(pfn)) == PFN_NOT_FOUND) return READ_ERROR; offset = xd->xc_core.header.xch_pages_offset + ((off_t)(page_index) * (off_t)xd->page_size); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } int write_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return WRITE_ERROR; } uint xendump_page_size(void) { if (!XENDUMP_VALID()) return 0; return xd->page_size; } /* * xendump_free_memory(), and xendump_memory_used() * are debug only, and typically unnecessary to implement. */ int xendump_free_memory(void) { return 0; } int xendump_memory_used(void) { return 0; } /* * This function is dump-type independent, used here to * to dump the xendump_data structure contents. */ int xendump_memory_dump(FILE *fp) { int i, linefeed, used, others; ulong *ulongptr; Elf32_Off offset32; Elf64_Off offset64; FILE *fpsave; fprintf(fp, " flags: %lx (", xd->flags); others = 0; if (xd->flags & XENDUMP_LOCAL) fprintf(fp, "%sXENDUMP_LOCAL", others++ ? "|" : ""); if (xd->flags & XC_SAVE) fprintf(fp, "%sXC_SAVE", others++ ? "|" : ""); if (xd->flags & XC_CORE_ORIG) fprintf(fp, "%sXC_CORE_ORIG", others++ ? "|" : ""); if (xd->flags & XC_CORE_ELF) fprintf(fp, "%sXC_CORE_ELF", others++ ? "|" : ""); if (xd->flags & XC_CORE_P2M_CREATE) fprintf(fp, "%sXC_CORE_P2M_CREATE", others++ ? "|" : ""); if (xd->flags & XC_CORE_PFN_CREATE) fprintf(fp, "%sXC_CORE_PFN_CREATE", others++ ? "|" : ""); if (xd->flags & XC_CORE_NO_P2M) fprintf(fp, "%sXC_CORE_NO_P2M", others++ ? "|" : ""); if (xd->flags & XC_SAVE_IA64) fprintf(fp, "%sXC_SAVE_IA64", others++ ? "|" : ""); if (xd->flags & XC_CORE_64BIT_HOST) fprintf(fp, "%sXC_CORE_64BIT_HOST", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " xfd: %d\n", xd->xfd); fprintf(fp, " page_size: %d\n", xd->page_size); fprintf(fp, " ofp: %lx\n", (ulong)xd->ofp); fprintf(fp, " page: %lx\n", (ulong)xd->page); fprintf(fp, " panic_pc: %lx\n", xd->panic_pc); fprintf(fp, " panic_sp: %lx\n", xd->panic_sp); fprintf(fp, " accesses: %ld\n", (ulong)xd->accesses); fprintf(fp, " cache_hits: %ld ", (ulong)xd->cache_hits); if (xd->accesses) fprintf(fp, "(%ld%%)\n", xd->cache_hits * 100 / xd->accesses); else fprintf(fp, "\n"); fprintf(fp, " last_pfn: %ld\n", xd->last_pfn); fprintf(fp, " redundant: %ld ", (ulong)xd->redundant); if (xd->accesses) fprintf(fp, "(%ld%%)\n", xd->redundant * 100 / xd->accesses); else fprintf(fp, "\n"); for (i = used = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) if (xd->poc && xd->poc[i].cnt) used++; if (xd->poc) fprintf(fp, " poc[%d]: %lx %s", PFN_TO_OFFSET_CACHE_ENTRIES, (ulong)xd->poc, xd->poc ? "" : "(none)"); else fprintf(fp, " poc[0]: (unused)\n"); for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) { if (!xd->poc) break; if (!xd->poc[i].cnt) { if (!i) fprintf(fp, "(none used)\n"); break; } else if (!i) fprintf(fp, "(%d used)\n", used); if (CRASHDEBUG(2)) fprintf(fp, " [%d]: pfn: %ld (0x%lx) count: %ld file_offset: %llx\n", i, xd->poc[i].pfn, xd->poc[i].pfn, xd->poc[i].cnt, (ulonglong)xd->poc[i].file_offset); } if (!xd->poc) fprintf(fp, "\n"); fprintf(fp, "\n xc_save:\n"); fprintf(fp, " nr_pfns: %ld (0x%lx)\n", xd->xc_save.nr_pfns, xd->xc_save.nr_pfns); fprintf(fp, " vmconfig_size: %d (0x%x)\n", xd->xc_save.vmconfig_size, xd->xc_save.vmconfig_size); fprintf(fp, " vmconfig_buf: %lx\n", (ulong)xd->xc_save.vmconfig_buf); if (xd->flags & XC_SAVE) xen_dump_vmconfig(fp); fprintf(fp, " p2m_frame_list: %lx ", (ulong)xd->xc_save.p2m_frame_list); if ((xd->flags & XC_SAVE) && xd->xc_save.p2m_frame_list) { fprintf(fp, "\n"); ulongptr = xd->xc_save.p2m_frame_list; for (i = 0; i < P2M_FL_ENTRIES; i++, ulongptr++) fprintf(fp, "%ld ", *ulongptr); fprintf(fp, "\n"); } else fprintf(fp, "(none)\n"); fprintf(fp, " pfns_not: %d\n", xd->xc_save.pfns_not); fprintf(fp, " pfns_not_offset: %lld\n", (ulonglong)xd->xc_save.pfns_not_offset); fprintf(fp, " vcpu_ctxt_offset: %lld\n", (ulonglong)xd->xc_save.vcpu_ctxt_offset); fprintf(fp, " shared_info_page_offset: %lld\n", (ulonglong)xd->xc_save.shared_info_page_offset); fprintf(fp, " region_pfn_type: %lx\n", (ulong)xd->xc_save.region_pfn_type); fprintf(fp, " batch_count: %ld\n", (ulong)xd->xc_save.batch_count); fprintf(fp, " batch_offsets: %lx %s\n", (ulong)xd->xc_save.batch_offsets, xd->xc_save.batch_offsets ? "" : "(none)"); for (i = linefeed = 0; i < xd->xc_save.batch_count; i++) { fprintf(fp, "[%d]: %llx ", i, (ulonglong)xd->xc_save.batch_offsets[i]); if (((i+1)%4) == 0) { fprintf(fp, "\n"); linefeed = FALSE; } else linefeed = TRUE; } if (linefeed) fprintf(fp, "\n"); fprintf(fp, " ia64_version: %ld\n", (ulong)xd->xc_save.ia64_version); fprintf(fp, " ia64_page_offsets: %lx ", (ulong)xd->xc_save.ia64_page_offsets); if (xd->xc_save.ia64_page_offsets) fprintf(fp, "(%ld entries)\n\n", xd->xc_save.nr_pfns); else fprintf(fp, "(none)\n\n"); fprintf(fp, " xc_core:\n"); fprintf(fp, " header:\n"); fprintf(fp, " xch_magic: %x ", xd->xc_core.header.xch_magic); if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC) fprintf(fp, "(XC_CORE_MAGIC)\n"); else if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC_HVM) fprintf(fp, "(XC_CORE_MAGIC_HVM)\n"); else fprintf(fp, "(unknown)\n"); fprintf(fp, " xch_nr_vcpus: %d\n", xd->xc_core.header.xch_nr_vcpus); fprintf(fp, " xch_nr_pages: %d (0x%x)\n", xd->xc_core.header.xch_nr_pages, xd->xc_core.header.xch_nr_pages); fprintf(fp, " xch_ctxt_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_ctxt_offset, (ulonglong)xd->xc_core.header.xch_ctxt_offset); fprintf(fp, " xch_index_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_index_offset, (ulonglong)xd->xc_core.header.xch_index_offset); fprintf(fp, " xch_pages_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_pages_offset, (ulonglong)xd->xc_core.header.xch_pages_offset); fprintf(fp, " elf_class: %s\n", xd->xc_core.elf_class == ELFCLASS64 ? "ELFCLASS64" : xd->xc_core.elf_class == ELFCLASS32 ? "ELFCLASS32" : "n/a"); fprintf(fp, " elf_strtab_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.elf_strtab_offset, (ulonglong)xd->xc_core.elf_strtab_offset); fprintf(fp, " format_version: %016llx\n", (ulonglong)xd->xc_core.format_version); fprintf(fp, " shared_info_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.shared_info_offset, (ulonglong)xd->xc_core.shared_info_offset); if (machine_type("IA64")) fprintf(fp, " ia64_mapped_regs_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.ia64_mapped_regs_offset, (ulonglong)xd->xc_core.ia64_mapped_regs_offset); fprintf(fp, " elf_index_pfn[%d]: %s", INDEX_PFN_COUNT, xd->xc_core.elf_class ? "\n" : "(none used)\n"); if (xd->xc_core.elf_class) { for (i = 0; i < INDEX_PFN_COUNT; i++) { fprintf(fp, "%ld:%ld ", xd->xc_core.elf_index_pfn[i].index, xd->xc_core.elf_index_pfn[i].pfn); } fprintf(fp, "\n"); } fprintf(fp, " last_batch:\n"); fprintf(fp, " index: %ld (%ld - %ld)\n", xd->xc_core.last_batch.index, xd->xc_core.last_batch.start, xd->xc_core.last_batch.end); fprintf(fp, " accesses: %ld\n", xd->xc_core.last_batch.accesses); fprintf(fp, " duplicates: %ld ", xd->xc_core.last_batch.duplicates); if (xd->xc_core.last_batch.accesses) fprintf(fp, "(%ld%%)\n", xd->xc_core.last_batch.duplicates * 100 / xd->xc_core.last_batch.accesses); else fprintf(fp, "\n"); fprintf(fp, " elf32: %lx\n", (ulong)xd->xc_core.elf32); fprintf(fp, " elf64: %lx\n", (ulong)xd->xc_core.elf64); fprintf(fp, " p2m_frames: %d\n", xd->xc_core.p2m_frames); fprintf(fp, " p2m_frame_index_list: %s\n", (xd->flags & (XC_CORE_NO_P2M|XC_SAVE)) ? "(not used)" : ""); for (i = 0; i < xd->xc_core.p2m_frames; i++) { fprintf(fp, "%ld ", xd->xc_core.p2m_frame_index_list[i]); } fprintf(fp, xd->xc_core.p2m_frames ? "\n" : ""); if ((xd->flags & XC_CORE_ORIG) && CRASHDEBUG(8)) xc_core_mfns(XENDUMP_LOCAL, fp); switch (xd->xc_core.elf_class) { case ELFCLASS32: fpsave = xd->ofp; xd->ofp = fp; xc_core_elf_dump(); offset32 = xd->xc_core.elf32->e_shoff; for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { xc_core_dump_Elf32_Shdr(offset32, ELFREAD); offset32 += xd->xc_core.elf32->e_shentsize; } xendump_print("\n"); xd->ofp = fpsave; break; case ELFCLASS64: fpsave = xd->ofp; xd->ofp = fp; xc_core_elf_dump(); offset64 = xd->xc_core.elf64->e_shoff; for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { xc_core_dump_Elf64_Shdr(offset64, ELFREAD); offset64 += xd->xc_core.elf64->e_shentsize; } xendump_print("\n"); xd->ofp = fpsave; break; } return 0; } static void xen_dump_vmconfig(FILE *fp) { int i, opens, closes; char *p; opens = closes = 0; p = xd->xc_save.vmconfig_buf; for (i = 0; i < xd->xc_save.vmconfig_size; i++, p++) { if (ascii(*p)) fprintf(fp, "%c", *p); else fprintf(fp, "<%x>", *p); if (*p == '(') opens++; else if (*p == ')') closes++; } fprintf(fp, "\n"); if (opens != closes) error(WARNING, "invalid vmconfig contents?\n"); } /* * Looking at the active set, try to determine who panicked, * or who was the "suspend" kernel thread. */ ulong get_xendump_panic_task(void) { int i; ulong task; struct task_context *tc; switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) { case XC_CORE_ORIG: case XC_CORE_ELF: if (machdep->xendump_panic_task) return (machdep->xendump_panic_task((void *)xd)); break; case XC_SAVE: for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->active_set[i])) continue; tc = task_to_context(task); if (is_kernel_thread(task) && STREQ(tc->comm, "suspend")) return tc->task; } break; } return NO_TASK; } /* * Figure out the back trace hooks. */ void get_xendump_regs(struct bt_info *bt, ulong *pc, ulong *sp) { int i; ulong *up; if ((tt->panic_task == bt->task) && (xd->panic_pc && xd->panic_sp)) { *pc = xd->panic_pc; *sp = xd->panic_sp; return; } switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) { case XC_CORE_ORIG: case XC_CORE_ELF: if (machdep->get_xendump_regs) return (machdep->get_xendump_regs(xd, bt, pc, sp)); break; case XC_SAVE: if (tt->panic_task != bt->task) break; for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++) { if (is_kernel_text(*up) && (STREQ(closest_symbol(*up), "__do_suspend"))) { *pc = *up; *sp = tt->flags & THREAD_INFO ? bt->tc->thread_info + (i * sizeof(long)) : bt->task + (i * sizeof(long)); xd->panic_pc = *pc; xd->panic_sp = *sp; return; } } } machdep->get_stack_frame(bt, pc, sp); } /* * Farm out most of the work to the proper architecture to create * the p2m table. For ELF core dumps, create the index;pfn table. */ static void xc_core_create_pfn_tables(void) { if (xd->flags & XC_CORE_P2M_CREATE) { if (!machdep->xendump_p2m_create) error(FATAL, "xen xc_core dumpfiles not supported on this architecture"); if (!machdep->xendump_p2m_create((void *)xd)) error(FATAL, "cannot create xen pfn-to-mfn mapping\n"); } if (xd->flags & XC_CORE_ELF) xc_core_elf_pfn_init(); xd->flags &= ~(XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE); if (CRASHDEBUG(1)) xendump_memory_dump(xd->ofp); } /* * Find the page index containing the mfn, and read the * machine page into the buffer. */ char * xc_core_mfn_to_page(ulong mfn, char *pgbuf) { int i, b, idx, done; ulong tmp[MAX_BATCH_SIZE]; off_t offset; size_t size; uint nr_pages; if (xd->flags & XC_CORE_ELF) return xc_core_elf_mfn_to_page(mfn, pgbuf); if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return NULL; } nr_pages = xd->xc_core.header.xch_nr_pages; if (xd->flags & XC_CORE_64BIT_HOST) nr_pages *= 2; for (b = 0, idx = -1, done = FALSE; !done && (b < nr_pages); b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return NULL; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) { done = TRUE; break; } if (tmp[i] == mfn) { idx = i+b; if (CRASHDEBUG(4)) fprintf(xd->ofp, "page: found mfn 0x%lx (%ld) at index %d\n", mfn, mfn, idx); done = TRUE; } } } if (idx == -1) { error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", mfn, mfn); return NULL; } if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to xch_pages_offset\n"); return NULL; } offset = (off_t)(idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to mfn-specified page\n"); return NULL; } if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { error(INFO, "cannot read mfn-specified page\n"); return NULL; } return pgbuf; } /* * Find the page index containing the mfn, and read the * machine page into the buffer. */ static char * xc_core_elf_mfn_to_page(ulong mfn, char *pgbuf) { int i, b, idx, done; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = 0, idx = -1, done = FALSE; !done && (b < nr_pages); b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return NULL; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) { done = TRUE; break; } tmp = (ulong)p2m_batch[i].gmfn; if (tmp == mfn) { idx = i+b; if (CRASHDEBUG(4)) fprintf(xd->ofp, "page: found mfn 0x%lx (%ld) at index %d\n", mfn, mfn, idx); done = TRUE; } } } if (idx == -1) { error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", mfn, mfn); return NULL; } if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_pages_offset\n"); offset = (off_t)(idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to mfn-specified page\n"); return NULL; } if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { error(INFO, "cannot read mfn-specified page\n"); return NULL; } return pgbuf; } /* * Find and return the page index containing the mfn. */ int xc_core_mfn_to_page_index(ulong mfn) { int i, b; ulong tmp[MAX_BATCH_SIZE]; uint nr_pages; size_t size; if (xd->flags & XC_CORE_ELF) return xc_core_elf_mfn_to_page_index(mfn); if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return MFN_NOT_FOUND; } nr_pages = xd->xc_core.header.xch_nr_pages; if (xd->flags & XC_CORE_64BIT_HOST) nr_pages *= 2; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return MFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if (tmp[i] == mfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found mfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, mfn, mfn, i+b); return (i+b); } } } return MFN_NOT_FOUND; } /* * Find and return the page index containing the mfn. */ static int xc_core_elf_mfn_to_page_index(ulong mfn) { int i, b; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return MFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)p2m_batch[i].gmfn; if (tmp == mfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found mfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, mfn, mfn, i+b); return (i+b); } } } return MFN_NOT_FOUND; } /* * XC_CORE mfn-related utility function. */ static int xc_core_mfns(ulong arg, FILE *ofp) { int i, b; uint nr_pages; ulong tmp[MAX_BATCH_SIZE]; ulonglong tmp64[MAX_BATCH_SIZE]; size_t size; if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return FALSE; } switch (arg) { case XC_CORE_64BIT_HOST: /* * Determine whether this is a 32-bit guest xendump that * was taken on a 64-bit xen host. */ if (machine_type("X86_64") || machine_type("IA64")) return FALSE; check_next_4: if (read(xd->xfd, tmp, sizeof(ulong) * 4) != (4 * sizeof(ulong))) { error(INFO, "cannot read index pages\n"); return FALSE; } if ((tmp[0] == 0xffffffff) || (tmp[1] == 0xffffffff) || (tmp[2] == 0xffffffff) || (tmp[3] == 0xffffffff) || (!tmp[0] && !tmp[1]) || (!tmp[2] && !tmp[3])) goto check_next_4; if (CRASHDEBUG(2)) fprintf(ofp, "mfns: %08lx %08lx %08lx %08lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); if (tmp[0] && !tmp[1] && tmp[2] && !tmp[3]) return TRUE; else return FALSE; case XENDUMP_LOCAL: if (BITS64() || (xd->flags & XC_CORE_64BIT_HOST)) goto show_64bit_mfns; fprintf(ofp, "xch_index_offset mfn list:\n"); nr_pages = xd->xc_core.header.xch_nr_pages; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return FALSE; } if (b) fprintf(ofp, "\n"); for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if ((i%8) == 0) fprintf(ofp, "%s[%d]:", i ? "\n" : "", b+i); if (tmp[i] == 0xffffffff) fprintf(ofp, " INVALID"); else fprintf(ofp, " %lx", tmp[i]); } } fprintf(ofp, "\nxch_nr_pages: %d\n", xd->xc_core.header.xch_nr_pages); return TRUE; show_64bit_mfns: fprintf(ofp, "xch_index_offset mfn list: %s\n", BITS32() ? "(64-bit mfns)" : ""); nr_pages = xd->xc_core.header.xch_nr_pages; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulonglong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp64, size) != size) { error(INFO, "cannot read index page %d\n", b); return FALSE; } if (b) fprintf(ofp, "\n"); for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if ((i%8) == 0) fprintf(ofp, "%s[%d]:", i ? "\n" : "", b+i); if (tmp64[i] == 0xffffffffffffffffULL) fprintf(ofp, " INVALID"); else fprintf(ofp, " %llx", tmp64[i]); } } fprintf(ofp, "\nxch_nr_pages: %d\n", nr_pages); return TRUE; default: return FALSE; } } /* * Given a normal kernel pfn, determine the page index in the dumpfile. * * - First determine which of the pages making up the * phys_to_machine_mapping[] array would contain the pfn. * - From the phys_to_machine_mapping page, determine the mfn. * - Find the mfn in the dumpfile page index. */ #define PFNS_PER_PAGE (xd->page_size/sizeof(unsigned long)) static ulong xc_core_pfn_to_page_index(ulong pfn) { ulong idx, p2m_idx, mfn_idx; ulong *up, mfn; off_t offset; /* * This function does not apply when there's no p2m * mapping and/or if this is an ELF format dumpfile. */ switch (xd->flags & (XC_CORE_NO_P2M|XC_CORE_ELF)) { case (XC_CORE_NO_P2M|XC_CORE_ELF): return xc_core_elf_pfn_valid(pfn); case XC_CORE_NO_P2M: return(xc_core_pfn_valid(pfn) ? pfn : PFN_NOT_FOUND); case XC_CORE_ELF: return xc_core_elf_pfn_to_page_index(pfn); } idx = pfn/PFNS_PER_PAGE; if (idx >= xd->xc_core.p2m_frames) { error(INFO, "pfn: %lx is too large for dumpfile\n", pfn); return PFN_NOT_FOUND; } p2m_idx = xd->xc_core.p2m_frame_index_list[idx]; if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to xch_pages_offset\n"); return PFN_NOT_FOUND; } offset = (off_t)(p2m_idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to pfn-specified page\n"); return PFN_NOT_FOUND; } if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) { error(INFO, "cannot read pfn-specified page\n"); return PFN_NOT_FOUND; } up = (ulong *)xd->page; up += (pfn%PFNS_PER_PAGE); mfn = *up; if ((mfn_idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) { if (!STREQ(pc->curcmd, "search")) error(INFO, "cannot find mfn in page index\n"); return PFN_NOT_FOUND; } return mfn_idx; } /* * Search the .xen_p2m array for the target pfn, starting at a * higher batch if appropriate. This presumes that the pfns * are laid out in ascending order. */ static ulong xc_core_elf_pfn_to_page_index(ulong pfn) { int i, b, start_index; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; /* * Initialize the start_index. */ xd->xc_core.last_batch.accesses++; start_index = 0; if ((pfn >= xd->xc_core.last_batch.start) && (pfn <= xd->xc_core.last_batch.end)) { xd->xc_core.last_batch.duplicates++; start_index = xd->xc_core.last_batch.index; } else { for (i = 0; i <= INDEX_PFN_COUNT; i++) { if ((i == INDEX_PFN_COUNT) || (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { if (--i < 0) i = 0; start_index = xd->xc_core.elf_index_pfn[i].index; break; } } } offset += (start_index * sizeof(struct xen_dumpcore_p2m)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return PFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)p2m_batch[i].pfn; if (tmp == pfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found pfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, pfn, pfn, i+b); if ((b+MAX_BATCH_SIZE) < nr_pages) { xd->xc_core.last_batch.index = b; xd->xc_core.last_batch.start = p2m_batch[0].pfn; xd->xc_core.last_batch.end = p2m_batch[MAX_BATCH_SIZE-1].pfn; } return (i+b); } } } return PFN_NOT_FOUND; } /* * In xendumps containing INVALID_MFN markers in the page index, * return the validity of the pfn. */ static int xc_core_pfn_valid(ulong pfn) { ulong mfn; off_t offset; if (pfn >= (ulong)xd->xc_core.header.xch_nr_pages) return FALSE; offset = xd->xc_core.header.xch_index_offset; if (xd->flags & XC_CORE_64BIT_HOST) offset += (off_t)(pfn * sizeof(ulonglong)); else offset += (off_t)(pfn * sizeof(ulong)); /* * The lseek and read should never fail, so report * any errors unconditionally. */ if (lseek(xd->xfd, offset, SEEK_SET) == -1) { error(INFO, "xendump: cannot lseek to page index for pfn %lx\n", pfn); return FALSE; } if (read(xd->xfd, &mfn, sizeof(ulong)) != sizeof(ulong)) { error(INFO, "xendump: cannot read index page for pfn %lx\n", pfn); return FALSE; } /* * If it's an invalid mfn, let the caller decide whether * to display an error message (unless debugging). */ if (mfn == INVALID_MFN) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "xendump: pfn %lx contains INVALID_MFN\n", pfn); return FALSE; } return TRUE; } /* * Return the index into the .xen_pfn array containing the pfn. * If not found, return PFN_NOT_FOUND. */ static ulong xc_core_elf_pfn_valid(ulong pfn) { int i, b, start_index; off_t offset; size_t size; uint nr_pages; ulong tmp; uint64_t pfn_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; /* * Initialize the start_index. */ xd->xc_core.last_batch.accesses++; start_index = 0; if ((pfn >= xd->xc_core.last_batch.start) && (pfn <= xd->xc_core.last_batch.end)) { xd->xc_core.last_batch.duplicates++; start_index = xd->xc_core.last_batch.index; } else { for (i = 0; i <= INDEX_PFN_COUNT; i++) { if ((i == INDEX_PFN_COUNT) || (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { if (--i < 0) i = 0; start_index = xd->xc_core.elf_index_pfn[i].index; break; } } } offset += (start_index * sizeof(uint64_t)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(uint64_t) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &pfn_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return PFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)pfn_batch[i]; if (tmp == pfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found pfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, pfn, pfn, i+b); if ((b+MAX_BATCH_SIZE) < nr_pages) { xd->xc_core.last_batch.index = b; xd->xc_core.last_batch.start = (ulong)pfn_batch[0]; xd->xc_core.last_batch.end = (ulong)pfn_batch[MAX_BATCH_SIZE-1]; } return (i+b); } } } return PFN_NOT_FOUND; } /* * Store the panic task's stack hooks from where it was found * in get_active_set_panic_task(). */ void xendump_panic_hook(char *stack) { int i, err, argc; char *arglist[MAXARGS]; char buf[BUFSIZE]; ulong value, *sp; if (machine_type("IA64")) /* needs switch_stack address */ return; strcpy(buf, stack); argc = parse_line(buf, arglist); if ((value = htol(strip_ending_char(arglist[0], ':'), RETURN_ON_ERROR, &err)) == BADADDR) return; for (sp = (ulong *)value, i = 1; i < argc; i++, sp++) { if (strstr(arglist[i], "xen_panic_event")) { if (!readmem((ulong)sp, KVADDR, &value, sizeof(ulong), "xen_panic_event address", RETURN_ON_ERROR)) return; xd->panic_sp = (ulong)sp; xd->panic_pc = value; } else if (strstr(arglist[i], "panic") && !xd->panic_sp) { if (!readmem((ulong)sp, KVADDR, &value, sizeof(ulong), "xen_panic_event address", RETURN_ON_ERROR)) return; xd->panic_sp = (ulong)sp; xd->panic_pc = value; } } } static void xendump_print(char *fmt, ...) { char buf[BUFSIZE]; va_list ap; if (!fmt || !strlen(fmt)) return; va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (xd->ofp) fprintf(xd->ofp, "%s", buf); else if (!XENDUMP_VALID() && CRASHDEBUG(7)) fprintf(stderr, "%s", buf); } /* * Support for xc_core ELF dumpfile format. */ static int xc_core_elf_verify(char *file, char *buf) { int i; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; Elf32_Off offset32; Elf64_Off offset64; char *eheader; int swap; eheader = buf; if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) goto bailout; swap = (((eheader[EI_DATA] == ELFDATA2LSB) && (__BYTE_ORDER == __BIG_ENDIAN)) || ((eheader[EI_DATA] == ELFDATA2MSB) && (__BYTE_ORDER == __LITTLE_ENDIAN))); elf32 = (Elf32_Ehdr *)buf; elf64 = (Elf64_Ehdr *)buf; if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && (swap16(elf32->e_type, swap) == ET_CORE) && (swap32(elf32->e_version, swap) == EV_CURRENT) && (swap16(elf32->e_shnum, swap) > 0)) { switch (swap16(elf32->e_machine, swap)) { case EM_386: if (machine_type_mismatch(file, "X86", NULL, 0)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, 0)) goto bailout; break; } if (endian_mismatch(file, elf32->e_ident[EI_DATA], 0)) goto bailout; xd->xc_core.elf_class = ELFCLASS32; if ((xd->xc_core.elf32 = (Elf32_Ehdr *)malloc(sizeof(Elf32_Ehdr))) == NULL) { fprintf(stderr, "cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(buf, xd->xc_core.elf32, sizeof(Elf32_Ehdr)); } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && (swap16(elf64->e_type, swap) == ET_CORE) && (swap32(elf64->e_version, swap) == EV_CURRENT) && (swap16(elf64->e_shnum, swap) > 0)) { switch (swap16(elf64->e_machine, swap)) { case EM_IA_64: if (machine_type_mismatch(file, "IA64", NULL, 0)) goto bailout; break; case EM_X86_64: if (machine_type_mismatch(file, "X86_64", "X86", 0)) goto bailout; break; case EM_386: if (machine_type_mismatch(file, "X86", NULL, 0)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, 0)) goto bailout; } if (endian_mismatch(file, elf64->e_ident[EI_DATA], 0)) goto bailout; xd->xc_core.elf_class = ELFCLASS64; if ((xd->xc_core.elf64 = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr))) == NULL) { fprintf(stderr, "cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(buf, xd->xc_core.elf64, sizeof(Elf64_Ehdr)); } else { if (CRASHDEBUG(1)) error(INFO, "%s: not a xen ELF core file\n", file); goto bailout; } xc_core_elf_dump(); switch (xd->xc_core.elf_class) { case ELFCLASS32: offset32 = xd->xc_core.elf32->e_shoff; for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { xc_core_dump_Elf32_Shdr(offset32, ELFSTORE); offset32 += xd->xc_core.elf32->e_shentsize; } xendump_print("\n"); break; case ELFCLASS64: offset64 = xd->xc_core.elf64->e_shoff; for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { xc_core_dump_Elf64_Shdr(offset64, ELFSTORE); offset64 += xd->xc_core.elf64->e_shentsize; } xendump_print("\n"); break; } xd->flags |= (XENDUMP_LOCAL | XC_CORE_ELF); if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); for (i = 0; i < INDEX_PFN_COUNT; i++) xd->xc_core.elf_index_pfn[i].pfn = ~0UL; if (CRASHDEBUG(1)) xendump_memory_dump(fp); return TRUE; bailout: return FALSE; } /* * Dump the relevant ELF header. */ static void xc_core_elf_dump(void) { switch (xd->xc_core.elf_class) { case ELFCLASS32: xc_core_dump_Elf32_Ehdr(xd->xc_core.elf32); break; case ELFCLASS64: xc_core_dump_Elf64_Ehdr(xd->xc_core.elf64); break; } } /* * Dump the 32-bit ELF header, and grab a pointer to the strtab section. */ static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *elf) { char buf[BUFSIZE]; Elf32_Off offset32; Elf32_Shdr shdr; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); xendump_print("\nElf32_Ehdr:\n"); xendump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: xendump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: xendump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: xendump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: xendump_print("(ELFCLASSNUM)\n"); break; default: xendump_print("(?)\n"); break; } xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: xendump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: xendump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: xendump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: xendump_print("(ELFDATANUM)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) xendump_print("(EV_CURRENT)\n"); else xendump_print("(?)\n"); xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: xendump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: xendump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: xendump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: xendump_print("(ELFOSABI_STANDALONE)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); xendump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: xendump_print("(ET_NONE)\n"); break; case ET_REL: xendump_print("(ET_REL)\n"); break; case ET_EXEC: xendump_print("(ET_EXEC)\n"); break; case ET_DYN: xendump_print("(ET_DYN)\n"); break; case ET_CORE: xendump_print("(ET_CORE)\n"); break; case ET_NUM: xendump_print("(ET_NUM)\n"); break; case ET_LOOS: xendump_print("(ET_LOOS)\n"); break; case ET_HIOS: xendump_print("(ET_HIOS)\n"); break; case ET_LOPROC: xendump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: xendump_print("(ET_HIPROC)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: xendump_print("(EM_386)\n"); break; default: xendump_print("(unsupported)\n"); break; } xendump_print(" e_version: %ld ", (ulong)elf->e_version); xendump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); xendump_print(" e_ehsize: %x\n", elf->e_ehsize); xendump_print(" e_phentsize: %x\n", elf->e_phentsize); xendump_print(" e_phnum: %x\n", elf->e_phnum); xendump_print(" e_shentsize: %x\n", elf->e_shentsize); xendump_print(" e_shnum: %x\n", elf->e_shnum); xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); /* Determine the strtab location. */ offset32 = elf->e_shoff + (elf->e_shstrndx * elf->e_shentsize); if (lseek(xd->xfd, offset32, SEEK_SET) != offset32) error(FATAL, "xc_core_dump_Elf32_Ehdr: cannot seek to strtab Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf32_Ehdr: cannot read strtab Elf32_Shdr\n"); xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; } /* * Dump the 64-bit ELF header, and grab a pointer to the strtab section. */ static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *elf) { char buf[BUFSIZE]; Elf64_Off offset64; Elf64_Shdr shdr; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); xendump_print("\nElf64_Ehdr:\n"); xendump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: xendump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: xendump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: xendump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: xendump_print("(ELFCLASSNUM)\n"); break; default: xendump_print("(?)\n"); break; } xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: xendump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: xendump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: xendump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: xendump_print("(ELFDATANUM)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) xendump_print("(EV_CURRENT)\n"); else xendump_print("(?)\n"); xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: xendump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: xendump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: xendump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: xendump_print("(ELFOSABI_STANDALONE)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); xendump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: xendump_print("(ET_NONE)\n"); break; case ET_REL: xendump_print("(ET_REL)\n"); break; case ET_EXEC: xendump_print("(ET_EXEC)\n"); break; case ET_DYN: xendump_print("(ET_DYN)\n"); break; case ET_CORE: xendump_print("(ET_CORE)\n"); break; case ET_NUM: xendump_print("(ET_NUM)\n"); break; case ET_LOOS: xendump_print("(ET_LOOS)\n"); break; case ET_HIOS: xendump_print("(ET_HIOS)\n"); break; case ET_LOPROC: xendump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: xendump_print("(ET_HIPROC)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: xendump_print("(EM_386)\n"); break; case EM_IA_64: xendump_print("(EM_IA_64)\n"); break; case EM_PPC64: xendump_print("(EM_PPC64)\n"); break; case EM_X86_64: xendump_print("(EM_X86_64)\n"); break; default: xendump_print("(unsupported)\n"); break; } xendump_print(" e_version: %ld ", (ulong)elf->e_version); xendump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); xendump_print(" e_ehsize: %x\n", elf->e_ehsize); xendump_print(" e_phentsize: %x\n", elf->e_phentsize); xendump_print(" e_phnum: %x\n", elf->e_phnum); xendump_print(" e_shentsize: %x\n", elf->e_shentsize); xendump_print(" e_shnum: %x\n", elf->e_shnum); xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); /* Determine the strtab location. */ offset64 = elf->e_shoff + (elf->e_shstrndx * elf->e_shentsize); if (lseek(xd->xfd, offset64, SEEK_SET) != offset64) error(FATAL, "xc_core_dump_Elf64_Ehdr: cannot seek to strtab Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf64_Ehdr: cannot read strtab Elf32_Shdr\n"); xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; } /* * Dump each 32-bit section header and the data that they reference. */ static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int store) { Elf32_Shdr shdr; char name[BUFSIZE]; int i; char c; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot seek to Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot read Elf32_Shdr\n"); xendump_print("\nElf32_Shdr:\n"); xendump_print(" sh_name: %lx ", shdr.sh_name); xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); xendump_print(" sh_type: %lx ", shdr.sh_type); switch (shdr.sh_type) { case SHT_NULL: xendump_print("(SHT_NULL)\n"); break; case SHT_PROGBITS: xendump_print("(SHT_PROGBITS)\n"); break; case SHT_STRTAB: xendump_print("(SHT_STRTAB)\n"); break; case SHT_NOTE: xendump_print("(SHT_NOTE)\n"); break; default: xendump_print("\n"); break; } xendump_print(" sh_flags: %lx\n", shdr.sh_flags); xendump_print(" sh_addr: %lx\n", shdr.sh_addr); xendump_print(" sh_offset: %lx\n", shdr.sh_offset); xendump_print(" sh_size: %lx\n", shdr.sh_size); xendump_print(" sh_link: %lx\n", shdr.sh_link); xendump_print(" sh_info: %lx\n", shdr.sh_info); xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); if (STREQ(name, ".shstrtab")) { if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != xd->xc_core.elf_strtab_offset) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot seek to strtab data\n"); xendump_print(" "); for (i = 0; i < shdr.sh_size; i++) { if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot read strtab data\n"); if (i && !c) xendump_print("\n "); else xendump_print("%c", c); } } if (STREQ(name, ".note.Xen")) xc_core_dump_elfnote((off_t)shdr.sh_offset, (size_t)shdr.sh_size, store); if (!store) return; if (STREQ(name, ".xen_prstatus")) xd->xc_core.header.xch_ctxt_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_shared_info")) xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_pfn")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); } if (STREQ(name, ".xen_p2m")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= XC_CORE_P2M_CREATE; } if (STREQ(name, ".xen_pages")) xd->xc_core.header.xch_pages_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_ia64_mapped_regs")) xd->xc_core.ia64_mapped_regs_offset = (off_t)shdr.sh_offset; } /* * Dump each 64-bit section header and the data that they reference. */ static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int store) { Elf64_Shdr shdr; char name[BUFSIZE]; int i; char c; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot seek to Elf64_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf64_Shdr)) != sizeof(Elf64_Shdr)) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot read Elf64_Shdr\n"); xendump_print("\nElf64_Shdr:\n"); xendump_print(" sh_name: %x ", shdr.sh_name); xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); xendump_print(" sh_type: %x ", shdr.sh_type); switch (shdr.sh_type) { case SHT_NULL: xendump_print("(SHT_NULL)\n"); break; case SHT_PROGBITS: xendump_print("(SHT_PROGBITS)\n"); break; case SHT_STRTAB: xendump_print("(SHT_STRTAB)\n"); break; case SHT_NOTE: xendump_print("(SHT_NOTE)\n"); break; default: xendump_print("\n"); break; } xendump_print(" sh_flags: %lx\n", shdr.sh_flags); xendump_print(" sh_addr: %lx\n", shdr.sh_addr); xendump_print(" sh_offset: %lx\n", shdr.sh_offset); xendump_print(" sh_size: %lx\n", shdr.sh_size); xendump_print(" sh_link: %x\n", shdr.sh_link); xendump_print(" sh_info: %x\n", shdr.sh_info); xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); if (STREQ(name, ".shstrtab")) { if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != xd->xc_core.elf_strtab_offset) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot seek to strtab data\n"); xendump_print(" "); for (i = 0; i < shdr.sh_size; i++) { if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot read strtab data\n"); if (i && !c) xendump_print("\n "); else xendump_print("%c", c); } } if (STREQ(name, ".note.Xen")) xc_core_dump_elfnote((off_t)shdr.sh_offset, (size_t)shdr.sh_size, store); if (!store) return; if (STREQ(name, ".xen_prstatus")) xd->xc_core.header.xch_ctxt_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_shared_info")) xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_pfn")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); } if (STREQ(name, ".xen_p2m")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= XC_CORE_P2M_CREATE; } if (STREQ(name, ".xen_pages")) xd->xc_core.header.xch_pages_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_ia64_mapped_regs")) xd->xc_core.ia64_mapped_regs_offset = (off_t)shdr.sh_offset; } /* * Return the string found at the specified index into * the dumpfile's strtab. */ static char * xc_core_strtab(uint32_t index, char *buf) { off_t offset; int i; offset = xd->xc_core.elf_strtab_offset + index; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_strtab: cannot seek to Elf64_Shdr\n"); BZERO(buf, BUFSIZE); i = 0; while (read(xd->xfd, &buf[i], sizeof(char)) == sizeof(char)) { if (buf[i] == NULLCHAR) break; i++; } return buf; } /* * Dump the array of elfnote structures, storing relevant info * when requested during initialization. This function is * common to both 32-bit and 64-bit ELF files. */ static void xc_core_dump_elfnote(off_t sh_offset, size_t sh_size, int store) { int i, lf, index; char *notes_buffer; struct elfnote *elfnote; ulonglong *data; struct xen_dumpcore_elfnote_header_desc *elfnote_header; struct xen_dumpcore_elfnote_format_version_desc *format_version; elfnote_header = NULL; format_version = NULL; if (!(notes_buffer = (char *)malloc(sh_size))) error(FATAL, "cannot malloc notes space."); if (lseek(xd->xfd, sh_offset, SEEK_SET) != sh_offset) error(FATAL, "xc_core_dump_elfnote: cannot seek to sh_offset\n"); if (read(xd->xfd, notes_buffer, sh_size) != sh_size) error(FATAL, "xc_core_dump_elfnote: cannot read elfnote data\n"); for (index = 0; index < sh_size; ) { elfnote = (struct elfnote *)¬es_buffer[index]; xendump_print(" namesz: %d\n", elfnote->namesz); xendump_print(" descz: %d\n", elfnote->descsz); xendump_print(" type: %x ", elfnote->type); switch (elfnote->type) { case XEN_ELFNOTE_DUMPCORE_NONE: xendump_print("(XEN_ELFNOTE_DUMPCORE_NONE)\n"); break; case XEN_ELFNOTE_DUMPCORE_HEADER: xendump_print("(XEN_ELFNOTE_DUMPCORE_HEADER)\n"); elfnote_header = (struct xen_dumpcore_elfnote_header_desc *) (elfnote+1); break; case XEN_ELFNOTE_DUMPCORE_XEN_VERSION: xendump_print("(XEN_ELFNOTE_DUMPCORE_XEN_VERSION)\n"); break; case XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION: xendump_print("(XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION)\n"); format_version = (struct xen_dumpcore_elfnote_format_version_desc *) (elfnote+1); break; default: xendump_print("(unknown)\n"); break; } xendump_print(" name: %s\n", elfnote->name); data = (ulonglong *)(elfnote+1); for (i = lf = 0; i < elfnote->descsz/sizeof(ulonglong); i++) { if (((i%2)==0)) { xendump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; xendump_print("%016llx ", *data++); } if (!elfnote->descsz) xendump_print(" (empty)"); xendump_print("\n"); index += sizeof(struct elfnote) + elfnote->descsz; } if (!store) return; if (elfnote_header) { xd->xc_core.header.xch_magic = elfnote_header->xch_magic; xd->xc_core.header.xch_nr_vcpus = elfnote_header->xch_nr_vcpus; xd->xc_core.header.xch_nr_pages = elfnote_header->xch_nr_pages; xd->page_size = elfnote_header->xch_page_size; } if (format_version) { switch (format_version->version) { case FORMAT_VERSION_0000000000000001: break; default: error(WARNING, "unsupported xen dump-core format version: %016llx\n", format_version->version); } xd->xc_core.format_version = format_version->version; } } /* * Initialize the batching list for the .xen_p2m or .xen_pfn * arrays. */ static void xc_core_elf_pfn_init(void) { int i, c, chunk; off_t offset; struct xen_dumpcore_p2m p2m; uint64_t pfn; switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) { case (XC_CORE_ELF|XC_CORE_NO_P2M): chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { offset = xd->xc_core.header.xch_index_offset + (off_t)(c * sizeof(uint64_t)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index %d\n", c); if (read(xd->xfd, &pfn, sizeof(uint64_t)) != sizeof(uint64_t)) error(FATAL, "cannot read page index %d\n", c); xd->xc_core.elf_index_pfn[i].index = c; xd->xc_core.elf_index_pfn[i].pfn = (ulong)pfn; } break; case XC_CORE_ELF: chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { offset = xd->xc_core.header.xch_index_offset + (off_t)(c * sizeof(struct xen_dumpcore_p2m)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index %d\n", c); if (read(xd->xfd, &p2m, sizeof(struct xen_dumpcore_p2m)) != sizeof(struct xen_dumpcore_p2m)) error(FATAL, "cannot read page index %d\n", c); xd->xc_core.elf_index_pfn[i].index = c; xd->xc_core.elf_index_pfn[i].pfn = (ulong)p2m.pfn; } break; } } struct xendump_data * get_xendump_data(void) { return (XENDUMP_VALID() ? xd : NULL); } crash-7.1.4/diskdump.c0000664000000000000000000021124612634305150013314 0ustar rootroot/* * diskdump.c * * The diskdump module optionally creates either ELF vmcore * dumpfiles, or compressed dumpfiles derived from the LKCD format. * In the case of ELF vmcore files, since they are identical to * netdump dumpfiles, the facilities in netdump.c are used. For * compressed dumpfiles, the facilities in this file are used. * * Copyright (C) 2004-2015 David Anderson * Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved. * Copyright (C) 2005 FUJITSU LIMITED * Copyright (C) 2005 NEC Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "diskdump.h" #include "xen_dom0.h" #define BITMAP_SECT_LEN 4096 struct diskdump_data { char *filename; ulong flags; /* DISKDUMP_LOCAL, plus anything else... */ int dfd; /* dumpfile file descriptor */ FILE *ofp; /* fprintf(dd->ofp, "xxx"); */ int machine_type; /* machine type identifier */ /* header */ struct disk_dump_header *header; struct disk_dump_sub_header *sub_header; struct kdump_sub_header *sub_header_kdump; unsigned long long max_mapnr; /* 64bit max_mapnr */ size_t data_offset; int block_size; int block_shift; char *bitmap; off_t bitmap_len; char *dumpable_bitmap; int byte, bit; char *compressed_page; /* copy of compressed page data */ char *curbufptr; /* ptr to uncompressed page buffer */ unsigned char *notes_buf; /* copy of elf notes */ void **nt_prstatus_percpu; uint num_prstatus_notes; void **nt_qemu_percpu; uint num_qemu_notes; /* page cache */ struct page_cache_hdr { /* header for each cached page */ uint32_t pg_flags; uint64_t pg_addr; char *pg_bufptr; ulong pg_hit_count; } page_cache_hdr[DISKDUMP_CACHED_PAGES]; char *page_cache_buf; /* base of cached buffer pages */ int evict_index; /* next page to evict */ ulong evictions; /* total evictions done */ ulong cached_reads; ulong *valid_pages; ulong accesses; ulong snapshot_task; }; static struct diskdump_data diskdump_data = { 0 }; static struct diskdump_data *dd = &diskdump_data; static int get_dump_level(void); ulong *diskdump_flags = &diskdump_data.flags; static int __diskdump_memory_dump(FILE *); static void dump_vmcoreinfo(FILE *); static void dump_note_offsets(FILE *); static char *vmcoreinfo_read_string(const char *); static void diskdump_get_osrelease(void); static int valid_note_address(unsigned char *); /* For split dumpfile */ static struct diskdump_data **dd_list = NULL; static int num_dd = 0; static int num_dumpfiles = 0; int dumpfile_is_split(void) { return KDUMP_SPLIT(); } void map_cpus_to_prstatus_kdump_cmprs(void) { void **nt_ptr; int online, i, j, nrcpus; size_t size; if (pc->flags2 & QEMU_MEM_DUMP_COMPRESSED) /* notes exist for all cpus */ goto resize_note_pointers; if (!(online = get_cpus_online()) || (online == kt->cpus) || machine_type("ARM64")) goto resize_note_pointers; if (CRASHDEBUG(1)) error(INFO, "cpus: %d online: %d NT_PRSTATUS notes: %d (remapping)\n", kt->cpus, online, dd->num_prstatus_notes); size = NR_CPUS * sizeof(void *); nt_ptr = (void **)GETBUF(size); BCOPY(dd->nt_prstatus_percpu, nt_ptr, size); BZERO(dd->nt_prstatus_percpu, size); /* * Re-populate the array with the notes mapping to online cpus */ nrcpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = 0, j = 0; i < nrcpus; i++) { if (in_cpu_map(ONLINE_MAP, i)) { dd->nt_prstatus_percpu[i] = nt_ptr[j++]; dd->num_prstatus_notes = MAX(dd->num_prstatus_notes, i+1); } } FREEBUF(nt_ptr); resize_note_pointers: /* * For architectures that only utilize the note pointers * within this file, resize the arrays accordingly. */ if (machine_type("X86_64") || machine_type("X86") || machine_type("ARM64")) { if ((dd->nt_prstatus_percpu = realloc(dd->nt_prstatus_percpu, dd->num_prstatus_notes * sizeof(void *))) == NULL) error(FATAL, "compressed kdump: cannot realloc NT_PRSTATUS note pointers\n"); if (dd->num_qemu_notes) { if ((dd->nt_qemu_percpu = realloc(dd->nt_qemu_percpu, dd->num_qemu_notes * sizeof(void *))) == NULL) error(FATAL, "compressed kdump: cannot realloc QEMU note pointers\n"); } else free(dd->nt_qemu_percpu); } } static void add_diskdump_data(char* name) { #define DDL_SIZE 16 int i; int sz = sizeof(void *); struct diskdump_data *ddp; if (dd_list == NULL) { dd_list = calloc(DDL_SIZE, sz); num_dd = DDL_SIZE; } else { for (i = 0; i < num_dumpfiles; i++) { ddp = dd_list[i]; if (same_file(ddp->filename, name)) error(FATAL, "split dumpfiles are identical:\n" " %s\n %s\n", ddp->filename, name); if (memcmp(ddp->header, dd->header, sizeof(struct disk_dump_header))) error(FATAL, "split dumpfiles derived from different vmcores:\n" " %s\n %s\n", ddp->filename, name); } } if (num_dumpfiles == num_dd) { /* expand list */ struct diskdump_data **tmp; tmp = calloc(num_dd*2, sz); memcpy(tmp, dd_list, sz*num_dd); free(dd_list); dd_list = tmp; num_dd *= 2; } dd_list[num_dumpfiles] = dd; dd->flags |= DUMPFILE_SPLIT; dd->filename = name; if (CRASHDEBUG(1)) fprintf(fp, "%s: start_pfn=%llu, end_pfn=%llu\n", name, dd->sub_header_kdump->start_pfn_64, dd->sub_header_kdump->end_pfn_64); } static void clean_diskdump_data(void) { int i; if (dd_list == NULL) return; for (i=1; ibitmap, nr >> 3, nr & 7); } static inline int page_is_dumpable(unsigned long nr) { return dd->dumpable_bitmap[nr>>3] & (1 << (nr & 7)); } static inline int dump_is_partial(const struct disk_dump_header *header) { return header->bitmap_blocks >= divideup(divideup(dd->max_mapnr, 8), dd->block_size) * 2; } static int open_dump_file(char *file) { int fd; fd = open(file, O_RDONLY); if (fd < 0) { error(INFO, "diskdump / compressed kdump: unable to open dump file %s\n", file); return FALSE; } if (KDUMP_SPLIT()) dd = calloc(1, sizeof(*dd)); dd->dfd = fd; return TRUE; } void process_elf32_notes(void *note_buf, unsigned long size_note) { Elf32_Nhdr *nt; size_t index, len = 0; int num = 0; int qemu_num = 0; for (index = 0; index < size_note; index += len) { nt = note_buf + index; if (nt->n_type == NT_PRSTATUS) { dd->nt_prstatus_percpu[num] = nt; num++; } len = sizeof(Elf32_Nhdr); if (STRNEQ((char *)nt + len, "QEMU")) { dd->nt_qemu_percpu[qemu_num] = nt; qemu_num++; } if (nt->n_type == NT_XEN_KDUMP_CR3 || nt->n_type == XEN_ELFNOTE_CRASH_INFO) { void *data = (char*)(nt + 1) + roundup(nt->n_namesz, 4); process_xen_note(nt->n_type, data, nt->n_descsz); } len = roundup(len + nt->n_namesz, 4); len = roundup(len + nt->n_descsz, 4); } if (num > 0) { pc->flags2 |= ELF_NOTES; dd->num_prstatus_notes = num; } if (qemu_num > 0) { pc->flags2 |= QEMU_MEM_DUMP_COMPRESSED; dd->num_qemu_notes = qemu_num; } return; } void process_elf64_notes(void *note_buf, unsigned long size_note) { Elf64_Nhdr *nt; size_t index, len = 0; int num = 0; int qemu_num = 0; for (index = 0; index < size_note; index += len) { nt = note_buf + index; if (nt->n_type == NT_PRSTATUS) { dd->nt_prstatus_percpu[num] = nt; num++; } if ((nt->n_type == NT_TASKSTRUCT) && (STRNEQ((char *)nt + sizeof(Elf64_Nhdr), "SNAP"))) { pc->flags2 |= (LIVE_DUMP|SNAP); dd->snapshot_task = *((ulong *)((char *)nt + sizeof(Elf64_Nhdr) + nt->n_namesz)); } len = sizeof(Elf64_Nhdr); if (STRNEQ((char *)nt + len, "QEMU")) { dd->nt_qemu_percpu[qemu_num] = nt; qemu_num++; } if (nt->n_type == NT_XEN_KDUMP_CR3 || nt->n_type == XEN_ELFNOTE_CRASH_INFO) { void *data = (char*)(nt + 1) + roundup(nt->n_namesz, 4); process_xen_note(nt->n_type, data, nt->n_descsz); } len = roundup(len + nt->n_namesz, 4); len = roundup(len + nt->n_descsz, 4); } if (num > 0) { pc->flags2 |= ELF_NOTES; dd->num_prstatus_notes = num; } if (qemu_num > 0) { pc->flags2 |= QEMU_MEM_DUMP_COMPRESSED; dd->num_qemu_notes = qemu_num; } return; } void x86_process_elf_notes(void *note_ptr, unsigned long size_note) { if (machine_type("X86_64")) process_elf64_notes(note_ptr, size_note); else if (machine_type("X86")) process_elf32_notes(note_ptr, size_note); } #if defined(__i386__) && (defined(ARM) || defined(MIPS)) /* * The kdump_sub_header member offsets are different when the crash * binary is built natively on an ARM host vs. when built with * "make target=ARM" on an x86/x86_64 host. This is because the * off_t structure members will be aligned on an 8-byte boundary when * compiled as an ARM binary -- which will be reflected in the * kdump_sub_header in a compressed ARM kdump. * * When crash is compiled as an x86 binary, these are the * structure's offsets: * * struct kdump_sub_header { * [0] unsigned long phys_base; * [4] int dump_level; / header_version 1 and later / * [8] int split; / header_version 2 and later / * [12] unsigned long start_pfn; / header_version 2 and later / * [16] unsigned long end_pfn; / header_version 2 and later / * [20] off_t offset_vmcoreinfo; / header_version 3 and later / * [28] unsigned long size_vmcoreinfo; / header_version 3 and later / * [32] off_t offset_note; / header_version 4 and later / * [40] unsigned long size_note; / header_version 4 and later / * [44] off_t offset_eraseinfo; / header_version 5 and later / * [52] unsigned long size_eraseinfo; / header_version 5 and later / * [56] unsigned long long start_pfn_64; / header_version 6 and later / * [64] unsigned long long end_pfn_64; / header_version 6 and later / * [72] unsigned long long max_mapnr_64; / header_version 6 and later / * }; * * But when compiled on an ARM processor, each 64-bit "off_t" would be pushed * up to an 8-byte boundary: * * struct kdump_sub_header { * [0] unsigned long phys_base; * [4] int dump_level; / header_version 1 and later / * [8] int split; / header_version 2 and later / * [12] unsigned long start_pfn; / header_version 2 and later / * [16] unsigned long end_pfn; / header_version 2 and later / * [24] off_t offset_vmcoreinfo; / header_version 3 and later / * [32] unsigned long size_vmcoreinfo; / header_version 3 and later / * [40] off_t offset_note; / header_version 4 and later / * [48] unsigned long size_note; / header_version 4 and later / * [56] off_t offset_eraseinfo; / header_version 5 and later / * [64] unsigned long size_eraseinfo; / header_version 5 and later / * [72] unsigned long long start_pfn_64; / header_version 6 and later / * [80] unsigned long long end_pfn_64; / header_version 6 and later / * [88] unsigned long long max_mapnr_64; / header_version 6 and later / * }; * */ struct kdump_sub_header_ARM_target { unsigned long phys_base; int dump_level; /* header_version 1 and later */ int split; /* header_version 2 and later */ unsigned long start_pfn; /* header_version 2 and later */ unsigned long end_pfn; /* header_version 2 and later */ int pad1; off_t offset_vmcoreinfo; /* header_version 3 and later */ unsigned long size_vmcoreinfo; /* header_version 3 and later */ int pad2; off_t offset_note; /* header_version 4 and later */ unsigned long size_note; /* header_version 4 and later */ int pad3; off_t offset_eraseinfo; /* header_version 5 and later */ unsigned long size_eraseinfo; /* header_version 5 and later */ int pad4; unsigned long long start_pfn_64; /* header_version 6 and later */ unsigned long long end_pfn_64; /* header_version 6 and later */ unsigned long long max_mapnr_64; /* header_version 6 and later */ }; static void arm_kdump_header_adjust(int header_version) { struct kdump_sub_header *kdsh; struct kdump_sub_header_ARM_target *kdsh_ARM_target; kdsh = dd->sub_header_kdump; kdsh_ARM_target = (struct kdump_sub_header_ARM_target *)kdsh; if (header_version >= 3) { kdsh->offset_vmcoreinfo = kdsh_ARM_target->offset_vmcoreinfo; kdsh->size_vmcoreinfo = kdsh_ARM_target->size_vmcoreinfo; } if (header_version >= 4) { kdsh->offset_note = kdsh_ARM_target->offset_note; kdsh->size_note = kdsh_ARM_target->size_note; } if (header_version >= 5) { kdsh->offset_eraseinfo = kdsh_ARM_target->offset_eraseinfo; kdsh->size_eraseinfo = kdsh_ARM_target->size_eraseinfo; } if (header_version >= 6) { kdsh->start_pfn_64 = kdsh_ARM_target->start_pfn_64; kdsh->end_pfn_64 = kdsh_ARM_target->end_pfn_64; kdsh->max_mapnr_64 = kdsh_ARM_target->max_mapnr_64; } else { kdsh->start_pfn_64 = kdsh_ARM_target->start_pfn; kdsh->end_pfn_64 = kdsh_ARM_target->end_pfn; kdsh->max_mapnr_64 = dd->max_mapnr; } } #endif /* __i386__ && (ARM || MIPS) */ static int read_dump_header(char *file) { struct disk_dump_header *header = NULL; struct disk_dump_sub_header *sub_header = NULL; struct kdump_sub_header *sub_header_kdump = NULL; size_t size; off_t bitmap_len; char *bufptr; size_t len; ssize_t bytes_read; int block_size = (int)sysconf(_SC_PAGESIZE); off_t offset; const off_t failed = (off_t)-1; ulong pfn; int i, j, max_sect_len; int is_split = 0; if (block_size < 0) return FALSE; restart: if ((header = realloc(header, block_size)) == NULL) error(FATAL, "diskdump / compressed kdump: cannot malloc block_size buffer\n"); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, 0, header, block_size)) { error(FATAL, "diskdump / compressed kdump: cannot read header\n"); goto err; } } else { if (lseek(dd->dfd, 0, SEEK_SET) == failed) { if (CRASHDEBUG(1)) error(INFO, "diskdump / compressed kdump: cannot lseek dump header\n"); goto err; } if (read(dd->dfd, header, block_size) < block_size) { if (CRASHDEBUG(1)) error(INFO, "diskdump / compressed kdump: cannot read dump header\n"); goto err; } } /* validate dump header */ if (!memcmp(header->signature, DISK_DUMP_SIGNATURE, sizeof(header->signature))) { dd->flags |= DISKDUMP_LOCAL; } else if (!memcmp(header->signature, KDUMP_SIGNATURE, sizeof(header->signature))) { dd->flags |= KDUMP_CMPRS_LOCAL; if (header->header_version >= 1) dd->flags |= ERROR_EXCLUDED; } else { if (CRASHDEBUG(1)) error(INFO, "diskdump / compressed kdump: dump does not have panic dump header\n"); goto err; } if (CRASHDEBUG(1)) fprintf(fp, "%s: header->utsname.machine: %s\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", header->utsname.machine); if (STRNEQ(header->utsname.machine, "i686") && machine_type_mismatch(file, "X86", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "x86_64") && machine_type_mismatch(file, "X86_64", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "ia64") && machine_type_mismatch(file, "IA64", NULL, 0)) goto err; else if (STREQ(header->utsname.machine, "ppc") && machine_type_mismatch(file, "PPC", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "ppc64") && machine_type_mismatch(file, "PPC64", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "arm") && machine_type_mismatch(file, "ARM", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "mips") && machine_type_mismatch(file, "MIPS", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "s390x") && machine_type_mismatch(file, "S390X", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "aarch64") && machine_type_mismatch(file, "ARM64", NULL, 0)) goto err; if (header->block_size != block_size) { block_size = header->block_size; if (CRASHDEBUG(1)) fprintf(fp, "retrying with different block/page size: %d\n", header->block_size); goto restart; } dd->block_size = header->block_size; dd->block_shift = ffs(header->block_size) - 1; if ((DISKDUMP_VALID() && (sizeof(*header) + sizeof(void *) * header->nr_cpus > block_size)) || header->nr_cpus <= 0) { error(WARNING, "%s: invalid nr_cpus value: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", header->nr_cpus); if (!machine_type("S390") && !machine_type("S390X") && !machine_type("X86") && !machine_type("X86_64")) { if (DISKDUMP_VALID()) goto err; } } /* read sub header */ offset = (off_t)block_size; if (DISKDUMP_VALID()) { if ((sub_header = malloc(block_size)) == NULL) error(FATAL, "diskdump: cannot malloc sub_header buffer\n"); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, sub_header, block_size)) { error(INFO, "diskdump: cannot read dump sub header\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "diskdump: cannot lseek dump sub header\n"); goto err; } if (read(dd->dfd, sub_header, block_size) < block_size) { error(INFO, "diskdump: cannot read dump sub header\n"); goto err; } } dd->sub_header = sub_header; /* the 64bit max_mapnr only exists in sub-header of compressed * kdump file, if it's not a compressed kdump file, we have to * use the old 32bit max_mapnr in dumpfile header. * max_mapnr may be truncated here. */ dd->max_mapnr = header->max_mapnr; } else if (KDUMP_CMPRS_VALID()) { if ((sub_header_kdump = malloc(block_size)) == NULL) error(FATAL, "compressed kdump: cannot malloc sub_header_kdump buffer\n"); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, sub_header_kdump, block_size)) { error(INFO, "compressed kdump: cannot read dump sub header\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek dump sub header\n"); goto err; } if (read(dd->dfd, sub_header_kdump, block_size) < block_size) { error(INFO, "compressed kdump: cannot read dump sub header\n"); goto err; } } dd->sub_header_kdump = sub_header_kdump; #if defined(__i386__) && (defined(ARM) || defined(MIPS)) arm_kdump_header_adjust(header->header_version); #endif /* use 64bit max_mapnr in compressed kdump file sub-header */ if (header->header_version >= 6) dd->max_mapnr = dd->sub_header_kdump->max_mapnr_64; else { dd->sub_header_kdump->start_pfn_64 = dd->sub_header_kdump->start_pfn; dd->sub_header_kdump->end_pfn_64 = dd->sub_header_kdump->end_pfn; } } if (header->header_version < 6) dd->max_mapnr = header->max_mapnr; /* read memory bitmap */ bitmap_len = block_size * header->bitmap_blocks; dd->bitmap_len = bitmap_len; offset = (off_t)block_size * (1 + header->sub_hdr_size); if ((dd->bitmap = malloc(bitmap_len)) == NULL) error(FATAL, "%s: cannot malloc bitmap buffer\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); dd->dumpable_bitmap = calloc(bitmap_len, 1); if (CRASHDEBUG(8)) fprintf(fp, "%s: memory bitmap offset: %llx\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", (ulonglong)offset); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, dd->bitmap, bitmap_len)) { error(INFO, "%s: cannot read memory bitmap\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "%s: cannot lseek memory bitmap\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); goto err; } bufptr = dd->bitmap; len = bitmap_len; while (len) { bytes_read = read(dd->dfd, bufptr, len); if (bytes_read <= 0) { error(INFO, "%s: cannot read memory bitmap\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); goto err; } len -= bytes_read; bufptr += bytes_read; } } if (dump_is_partial(header)) memcpy(dd->dumpable_bitmap, dd->bitmap + bitmap_len/2, bitmap_len/2); else memcpy(dd->dumpable_bitmap, dd->bitmap, bitmap_len); dd->data_offset = (1 + header->sub_hdr_size + header->bitmap_blocks) * header->block_size; dd->header = header; if (machine_type("ARM")) dd->machine_type = EM_ARM; else if (machine_type("MIPS")) dd->machine_type = EM_MIPS; else if (machine_type("X86")) dd->machine_type = EM_386; else if (machine_type("X86_64")) dd->machine_type = EM_X86_64; else if (machine_type("IA64")) dd->machine_type = EM_IA_64; else if (machine_type("PPC")) dd->machine_type = EM_PPC; else if (machine_type("PPC64")) dd->machine_type = EM_PPC64; else if (machine_type("S390X")) dd->machine_type = EM_S390; else if (machine_type("ARM64")) dd->machine_type = EM_AARCH64; else { error(INFO, "%s: unsupported machine type: %s\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", MACHINE_TYPE); goto err; } /* process elf notes data */ if (KDUMP_CMPRS_VALID() && !(dd->flags & NO_ELF_NOTES) && (dd->header->header_version >= 4) && (sub_header_kdump->offset_note) && (sub_header_kdump->size_note) && (machdep->process_elf_notes)) { size = sub_header_kdump->size_note; offset = sub_header_kdump->offset_note; if ((dd->notes_buf = malloc(size)) == NULL) error(FATAL, "compressed kdump: cannot malloc notes" " buffer\n"); if ((dd->nt_prstatus_percpu = malloc(NR_CPUS * sizeof(void *))) == NULL) error(FATAL, "compressed kdump: cannot malloc pointer" " to NT_PRSTATUS notes\n"); if ((dd->nt_qemu_percpu = malloc(NR_CPUS * sizeof(void *))) == NULL) error(FATAL, "qemu mem dump compressed: cannot malloc pointer" " to QEMU notes\n"); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, dd->notes_buf, size)) { error(INFO, "compressed kdump: cannot read notes data" "\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek notes data\n"); goto err; } if (read(dd->dfd, dd->notes_buf, size) < size) { error(INFO, "compressed kdump: cannot read notes data" "\n"); goto err; } } machdep->process_elf_notes(dd->notes_buf, size); } /* Check if dump file contains erasesinfo data */ if (KDUMP_CMPRS_VALID() && (dd->header->header_version >= 5) && (sub_header_kdump->offset_eraseinfo) && (sub_header_kdump->size_eraseinfo)) pc->flags2 |= ERASEINFO_DATA; if (KDUMP_CMPRS_VALID() && (dd->header->header_version >= 3) && dd->sub_header_kdump->offset_vmcoreinfo && dd->sub_header_kdump->size_vmcoreinfo) pc->flags2 |= VMCOREINFO; if (KDUMP_CMPRS_VALID() && (dd->header->status & DUMP_DH_COMPRESSED_INCOMPLETE)) pc->flags2 |= INCOMPLETE_DUMP; if (KDUMP_CMPRS_VALID() && (dd->header->status & DUMP_DH_EXCLUDED_VMEMMAP)) pc->flags2 |= EXCLUDED_VMEMMAP; /* For split dumpfile */ if (KDUMP_CMPRS_VALID()) { is_split = ((dd->header->header_version >= 2) && (sub_header_kdump->split)); if ((is_split && (num_dumpfiles != 0) && (dd_list == NULL))|| (!is_split && (num_dumpfiles != 0))) { clean_diskdump_data(); goto err; } if (is_split) add_diskdump_data(file); num_dumpfiles++; } if (!is_split) { max_sect_len = divideup(dd->max_mapnr, BITMAP_SECT_LEN); pfn = 0; dd->filename = file; } else { unsigned long long start = sub_header_kdump->start_pfn_64; unsigned long long end = sub_header_kdump->end_pfn_64; max_sect_len = divideup(end - start + 1, BITMAP_SECT_LEN); pfn = start; } dd->valid_pages = calloc(sizeof(ulong), max_sect_len + 1); for (i = 1; i < max_sect_len + 1; i++) { dd->valid_pages[i] = dd->valid_pages[i - 1]; for (j = 0; j < BITMAP_SECT_LEN; j++, pfn++) if (page_is_dumpable(pfn)) dd->valid_pages[i]++; } return TRUE; err: free(header); if (sub_header) free(sub_header); if (sub_header_kdump) free(sub_header_kdump); if (dd->bitmap) free(dd->bitmap); if (dd->dumpable_bitmap) free(dd->dumpable_bitmap); if (dd->notes_buf) free(dd->notes_buf); if (dd->nt_prstatus_percpu) free(dd->nt_prstatus_percpu); if (dd->nt_qemu_percpu) free(dd->nt_qemu_percpu); dd->flags &= ~(DISKDUMP_LOCAL|KDUMP_CMPRS_LOCAL); pc->flags2 &= ~ELF_NOTES; return FALSE; } static ulong pfn_to_pos(ulong pfn) { ulong desc_pos, j, valid; ulong p1, p2; if (KDUMP_SPLIT()) { p1 = pfn - dd->sub_header_kdump->start_pfn_64; p2 = round(p1, BITMAP_SECT_LEN) + dd->sub_header_kdump->start_pfn_64; } else { p1 = pfn; p2 = round(pfn, BITMAP_SECT_LEN); } valid = dd->valid_pages[p1 / BITMAP_SECT_LEN]; for (j = p2, desc_pos = valid; j <= pfn; j++) if (page_is_dumpable(j)) desc_pos++; return desc_pos; } /* * Determine whether a file is a diskdump creation, and if TRUE, * initialize the diskdump_data structure based upon the contents * of the diskdump header data. */ int is_diskdump(char *file) { int sz, i; if (!open_dump_file(file) || !read_dump_header(file)) return FALSE; sz = dd->block_size * (DISKDUMP_CACHED_PAGES); if ((dd->page_cache_buf = malloc(sz)) == NULL) error(FATAL, "%s: cannot malloc compressed page_cache_buf\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) dd->page_cache_hdr[i].pg_bufptr = &dd->page_cache_buf[i * dd->block_size]; if ((dd->compressed_page = (char *)malloc(dd->block_size)) == NULL) error(FATAL, "%s: cannot malloc compressed page space\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); if (CRASHDEBUG(1)) __diskdump_memory_dump(fp); if (pc->flags2 & GET_OSRELEASE) diskdump_get_osrelease(); #ifdef LZO if (lzo_init() == LZO_E_OK) dd->flags |= LZO_SUPPORTED; #endif #ifdef SNAPPY dd->flags |= SNAPPY_SUPPORTED; #endif pc->read_vmcoreinfo = vmcoreinfo_read_string; if ((pc->flags2 & GET_LOG) && KDUMP_CMPRS_VALID()) { pc->dfd = dd->dfd; pc->readmem = read_diskdump; pc->flags |= DISKDUMP; get_log_from_vmcoreinfo(file); } return TRUE; } /* * Perform any post-dumpfile determination stuff here. * At a minimum */ int diskdump_init(char *unused, FILE *fptr) { if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) return FALSE; dd->ofp = fptr; return TRUE; } /* * Get the relocational offset from the sub header of kdump. */ int diskdump_phys_base(unsigned long *phys_base) { if (KDUMP_CMPRS_VALID()) { *phys_base = dd->sub_header_kdump->phys_base; return TRUE; } return FALSE; } /* * Check whether paddr is already cached. */ static int page_is_cached(physaddr_t paddr) { int i; struct page_cache_hdr *pgc; dd->accesses++; for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) { pgc = &dd->page_cache_hdr[i]; if (!DISKDUMP_VALID_PAGE(pgc->pg_flags)) continue; if (pgc->pg_addr == paddr) { pgc->pg_hit_count++; dd->curbufptr = pgc->pg_bufptr; dd->cached_reads++; return TRUE; } } return FALSE; } /* * Translate physical address in paddr to PFN number. This means normally that * we just shift paddr by some constant. Some architectures need special * handling for this, however. */ static ulong paddr_to_pfn(physaddr_t paddr) { #ifdef ARM /* * In ARM, PFN 0 means first page in kernel direct-mapped view. * This is also first page in mem_map as well. */ return (paddr - machdep->machspec->phys_base) >> dd->block_shift; #else return paddr >> dd->block_shift; #endif } /* * Cache the page's data. * * If an empty page cache location is available, take it. Otherwise, evict * the entry indexed by evict_index, and then bump evict index. The hit_count * is only gathered for dump_diskdump_environment(). * * If the page is compressed, uncompress it into the selected page cache entry. * If the page is raw, just copy it into the selected page cache entry. * If all works OK, update diskdump->curbufptr to point to the page's * uncompressed data. */ static int cache_page(physaddr_t paddr) { int i, ret; int found; ulong pfn; ulong desc_pos; off_t seek_offset; page_desc_t pd; const int block_size = dd->block_size; const off_t failed = (off_t)-1; ulong retlen; for (i = found = 0; i < DISKDUMP_CACHED_PAGES; i++) { if (DISKDUMP_VALID_PAGE(dd->page_cache_hdr[i].pg_flags)) continue; found = TRUE; break; } if (!found) { i = dd->evict_index; dd->page_cache_hdr[i].pg_hit_count = 0; dd->evict_index = (dd->evict_index+1) % DISKDUMP_CACHED_PAGES; dd->evictions++; } dd->page_cache_hdr[i].pg_flags = 0; dd->page_cache_hdr[i].pg_addr = paddr; dd->page_cache_hdr[i].pg_hit_count++; /* find page descriptor */ pfn = paddr_to_pfn(paddr); desc_pos = pfn_to_pos(pfn); seek_offset = dd->data_offset + (off_t)(desc_pos - 1)*sizeof(page_desc_t); /* read page descriptor */ if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, seek_offset, &pd, sizeof(pd))) return READ_ERROR; } else { if (lseek(dd->dfd, seek_offset, SEEK_SET) == failed) return SEEK_ERROR; if (read(dd->dfd, &pd, sizeof(pd)) != sizeof(pd)) return READ_ERROR; } /* sanity check */ if (pd.size > block_size) return READ_ERROR; /* read page data */ if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, pd.offset, dd->compressed_page, pd.size)) return READ_ERROR; } else if (is_incomplete_dump() && (0 == pd.offset)) { /* * If the incomplete flag has been set in the header, * first check whether zero_excluded has been set. */ if (*diskdump_flags & ZERO_EXCLUDED) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump/cache_page: zero-fill: " "paddr/pfn: %llx/%lx\n", (ulonglong)paddr, pfn); memset(dd->compressed_page, 0, dd->block_size); } else return READ_ERROR; } else { if (lseek(dd->dfd, pd.offset, SEEK_SET) == failed) return SEEK_ERROR; if (read(dd->dfd, dd->compressed_page, pd.size) != pd.size) return READ_ERROR; } if (pd.flags & DUMP_DH_COMPRESSED_ZLIB) { retlen = block_size; ret = uncompress((unsigned char *)dd->page_cache_hdr[i].pg_bufptr, &retlen, (unsigned char *)dd->compressed_page, pd.size); if ((ret != Z_OK) || (retlen != block_size)) { error(INFO, "%s: uncompress failed: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ret); return READ_ERROR; } } else if (pd.flags & DUMP_DH_COMPRESSED_LZO) { if (!(dd->flags & LZO_SUPPORTED)) { error(INFO, "%s: uncompress failed: no lzo compression support\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); return READ_ERROR; } #ifdef LZO retlen = block_size; ret = lzo1x_decompress_safe((unsigned char *)dd->compressed_page, pd.size, (unsigned char *)dd->page_cache_hdr[i].pg_bufptr, &retlen, LZO1X_MEM_DECOMPRESS); if ((ret != LZO_E_OK) || (retlen != block_size)) { error(INFO, "%s: uncompress failed: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ret); return READ_ERROR; } #endif } else if (pd.flags & DUMP_DH_COMPRESSED_SNAPPY) { if (!(dd->flags & SNAPPY_SUPPORTED)) { error(INFO, "%s: uncompress failed: no snappy compression support\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); return READ_ERROR; } #ifdef SNAPPY ret = snappy_uncompressed_length((char *)dd->compressed_page, pd.size, (size_t *)&retlen); if (ret != SNAPPY_OK) { error(INFO, "%s: uncompress failed: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ret); return READ_ERROR; } ret = snappy_uncompress((char *)dd->compressed_page, pd.size, (char *)dd->page_cache_hdr[i].pg_bufptr, (size_t *)&retlen); if ((ret != SNAPPY_OK) || (retlen != block_size)) { error(INFO, "%s: uncompress failed: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ret); return READ_ERROR; } #endif } else memcpy(dd->page_cache_hdr[i].pg_bufptr, dd->compressed_page, block_size); dd->page_cache_hdr[i].pg_flags |= PAGE_VALID; dd->curbufptr = dd->page_cache_hdr[i].pg_bufptr; return TRUE; } /* * Read from a diskdump-created dumpfile. */ int read_diskdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int ret; physaddr_t curpaddr; ulong pfn, page_offset; physaddr_t paddr_in = paddr; if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) { if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: xen_kdump_p2m(%llx): " "P2M_FAILURE\n", (ulonglong)paddr_in); return READ_ERROR; } if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: xen_kdump_p2m(%llx): %llx\n", (ulonglong)paddr_in, (ulonglong)paddr); } pfn = paddr_to_pfn(paddr); if (KDUMP_SPLIT()) { /* Find proper dd */ int i; unsigned long long start_pfn; unsigned long long end_pfn; for (i=0; isub_header_kdump->start_pfn_64; end_pfn = dd_list[i]->sub_header_kdump->end_pfn_64; if ((pfn >= start_pfn) && (pfn < end_pfn)) { dd = dd_list[i]; break; } } if (i == num_dumpfiles) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: SEEK_ERROR: " "paddr/pfn %llx/%lx beyond last dumpfile\n", (ulonglong)paddr, pfn); return SEEK_ERROR; } } curpaddr = paddr & ~((physaddr_t)(dd->block_size-1)); page_offset = paddr & ((physaddr_t)(dd->block_size-1)); if ((pfn >= dd->max_mapnr) || !page_is_ram(pfn)) { if (CRASHDEBUG(8)) { fprintf(fp, "read_diskdump: SEEK_ERROR: " "paddr/pfn: %llx/%lx ", (ulonglong)paddr, pfn); if (pfn >= dd->max_mapnr) fprintf(fp, "max_mapnr: %llx\n", dd->max_mapnr); else fprintf(fp, "!page_is_ram\n"); } return SEEK_ERROR; } if (!page_is_dumpable(pfn)) { if ((dd->flags & (ZERO_EXCLUDED|ERROR_EXCLUDED)) == ERROR_EXCLUDED) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: PAGE_EXCLUDED: " "paddr/pfn: %llx/%lx\n", (ulonglong)paddr, pfn); return PAGE_EXCLUDED; } if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: zero-fill: " "paddr/pfn: %llx/%lx\n", (ulonglong)paddr, pfn); memset(bufptr, 0, cnt); return cnt; } if (!page_is_cached(curpaddr)) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: paddr/pfn: %llx/%lx" " -> cache physical page: %llx\n", (ulonglong)paddr, pfn, (ulonglong)curpaddr); if ((ret = cache_page(curpaddr)) < 0) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: " "%s: cannot cache page: %llx\n", ret == SEEK_ERROR ? "SEEK_ERROR" : "READ_ERROR", (ulonglong)curpaddr); return ret; } } else if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: paddr/pfn: %llx/%lx" " -> physical page is cached: %llx\n", (ulonglong)paddr, pfn, (ulonglong)curpaddr); memcpy(bufptr, dd->curbufptr + page_offset, cnt); return cnt; } /* * Write to a diskdump-created dumpfile. */ int write_diskdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return 0; } ulong get_diskdump_panic_task(void) { int i; if ((!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) || !get_active_set()) return NO_TASK; if (pc->flags2 & SNAP) return (task_exists(dd->snapshot_task) ? dd->snapshot_task : NO_TASK); if (DISKDUMP_VALID()) return (ulong)dd->header->tasks[dd->header->current_cpu]; if (KDUMP_CMPRS_VALID()) { if (kernel_symbol_exists("crashing_cpu") && cpu_map_addr("online")) { get_symbol_data("crashing_cpu", sizeof(int), &i); if ((i >= 0) && in_cpu_map(ONLINE_MAP, i)) { if (CRASHDEBUG(1)) error(INFO, "get_diskdump_panic_task: " "active_set[%d]: %lx\n", i, tt->active_set[i]); return (tt->active_set[i]); } } } return NO_TASK; } extern void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); extern void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); static void get_diskdump_regs_32(struct bt_info *bt, ulong *eip, ulong *esp) { Elf32_Nhdr *note; int len; if (KDUMP_CMPRS_VALID() && (bt->task == tt->panic_task || (is_task_active(bt->task) && dd->num_prstatus_notes > 1))) { note = (Elf32_Nhdr*) dd->nt_prstatus_percpu[bt->tc->processor]; if (!note) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_ppc(struct bt_info *bt, ulong *eip, ulong *esp) { if (KDUMP_CMPRS_VALID()) ppc_relocate_nt_prstatus_percpu(dd->nt_prstatus_percpu, &dd->num_prstatus_notes); get_diskdump_regs_32(bt, eip, esp); } static void get_diskdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp) { if ((bt->task == tt->panic_task) && DISKDUMP_VALID()) bt->machdep = &dd->sub_header->elf_regs; machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_arm(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_arm64(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } /* * Send the request to the proper architecture hander. */ void get_diskdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) { switch (dd->machine_type) { case EM_ARM: get_diskdump_regs_arm(bt, eip, esp); break; case EM_MIPS: return get_diskdump_regs_32(bt, eip, esp); break; case EM_386: return get_netdump_regs_x86(bt, eip, esp); break; case EM_IA_64: /* For normal backtraces, this information will be obtained * frome the switch_stack structure, which is pointed to by * the thread.ksp field of the task_struct. But it's still * needed by the "bt -t" option. */ machdep->get_stack_frame(bt, eip, esp); break; case EM_PPC: return get_diskdump_regs_ppc(bt, eip, esp); break; case EM_PPC64: return get_diskdump_regs_ppc64(bt, eip, esp); break; case EM_X86_64: return get_netdump_regs_x86_64(bt, eip, esp); break; case EM_S390: return machdep->get_stack_frame(bt, eip, esp); break; case EM_AARCH64: get_diskdump_regs_arm64(bt, eip, esp); break; default: error(FATAL, "%s: unsupported machine type: %s\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", MACHINE_TYPE); } } /* * Return the processor page size. */ uint diskdump_page_size(void) { if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) return 0; return dd->header->block_size; } /* * diskdump_free_memory(), and diskdump_memory_used() * are debug only, and probably unnecessary to implement. */ int diskdump_free_memory(void) { return 0; } int diskdump_memory_used(void) { return 0; } static void dump_vmcoreinfo(FILE *fp) { char *buf = NULL; unsigned long i = 0; unsigned long size_vmcoreinfo = dd->sub_header_kdump->size_vmcoreinfo; off_t offset = dd->sub_header_kdump->offset_vmcoreinfo; const off_t failed = (off_t)-1; if ((buf = malloc(size_vmcoreinfo)) == NULL) { error(FATAL, "compressed kdump: cannot malloc vmcoreinfo" " buffer\n"); } if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, buf, size_vmcoreinfo)) { error(INFO, "compressed kdump: cannot read vmcoreinfo data\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek dump vmcoreinfo\n"); goto err; } if (read(dd->dfd, buf, size_vmcoreinfo) < size_vmcoreinfo) { error(INFO, "compressed kdump: cannot read vmcoreinfo data\n"); goto err; } } fprintf(fp, " "); for (i = 0; i < size_vmcoreinfo; i++) { fprintf(fp, "%c", buf[i]); if ((buf[i] == '\n') && ((i+1) != size_vmcoreinfo)) fprintf(fp, " "); } if (buf[i-1] != '\n') fprintf(fp, "\n"); err: if (buf) free(buf); return; } static void dump_eraseinfo(FILE *fp) { char *buf = NULL; unsigned long i = 0; unsigned long size_eraseinfo = dd->sub_header_kdump->size_eraseinfo; off_t offset = dd->sub_header_kdump->offset_eraseinfo; const off_t failed = (off_t)-1; if ((buf = malloc(size_eraseinfo)) == NULL) { error(FATAL, "compressed kdump: cannot malloc eraseinfo" " buffer\n"); } if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, buf, size_eraseinfo)) { error(INFO, "compressed kdump: cannot read eraseinfo data\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek dump eraseinfo\n"); goto err; } if (read(dd->dfd, buf, size_eraseinfo) < size_eraseinfo) { error(INFO, "compressed kdump: cannot read eraseinfo data\n"); goto err; } } fprintf(fp, " "); for (i = 0; i < size_eraseinfo; i++) { fprintf(fp, "%c", buf[i]); if (buf[i] == '\n') fprintf(fp, " "); } if (buf[i - 1] != '\n') fprintf(fp, "\n"); err: if (buf) free(buf); return; } static void dump_note_offsets(FILE *fp) { struct kdump_sub_header *sub_header_kdump = dd->sub_header_kdump; size_t size; off_t offset; Elf32_Nhdr *note32 = NULL; Elf64_Nhdr *note64 = NULL; size_t tot, len = 0; int qemu, cnt; if (KDUMP_CMPRS_VALID() && !(dd->flags & NO_ELF_NOTES) && (dd->header->header_version >= 4) && (sub_header_kdump->offset_note) && (sub_header_kdump->size_note) && (machdep->process_elf_notes)) { size = sub_header_kdump->size_note; offset = sub_header_kdump->offset_note; fprintf(fp, " NOTE offsets: "); for (tot = cnt = 0; tot < size; tot += len) { qemu = FALSE; if (machine_type("X86_64") || machine_type("S390X") || machine_type("ARM64") || machine_type("PPC64")) { note64 = (void *)dd->notes_buf + tot; len = sizeof(Elf64_Nhdr); if (STRNEQ((char *)note64 + len, "QEMU")) qemu = TRUE; len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); if (note64->n_type == NT_PRSTATUS) { fprintf(fp, "%s%lx (NT_PRSTATUS)\n", tot ? space(22) : "", (ulong)(offset + tot)); cnt++; } if (qemu) { fprintf(fp, "%s%lx (QEMU)\n", tot ? space(22) : "", (ulong)(offset + tot)); cnt++; } } else if (machine_type("X86") || machine_type("PPC")) { note32 = (void *)dd->notes_buf + tot; len = sizeof(Elf32_Nhdr); if (STRNEQ((char *)note32 + len, "QEMU")) qemu = TRUE; len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); if (note32->n_type == NT_PRSTATUS) { fprintf(fp, "%s%lx (NT_PRSTATUS)\n", tot ? space(22) : "", (ulong)(offset + tot)); cnt++; } if (qemu) { fprintf(fp, "%s%lx (QEMU)\n", tot ? space(22) : "", (ulong)(offset + tot)); cnt++; } } } if (!cnt) fprintf(fp, "\n"); } } /* * This function is dump-type independent, and could be used * to dump the diskdump_data structure contents and perhaps * the diskdump header data. */ int __diskdump_memory_dump(FILE *fp) { int i, others, dump_level; struct disk_dump_header *dh; struct disk_dump_sub_header *dsh; struct kdump_sub_header *kdsh; ulong *tasks; if (FLAT_FORMAT()) dump_flat_header(fp); fprintf(fp, "diskdump_data: \n"); fprintf(fp, " filename: %s\n", dd->filename); fprintf(fp, " flags: %lx (", dd->flags); others = 0; if (dd->flags & DISKDUMP_LOCAL) fprintf(fp, "%sDISKDUMP_LOCAL", others++ ? "|" : ""); if (dd->flags & KDUMP_CMPRS_LOCAL) fprintf(fp, "%sKDUMP_CMPRS_LOCAL", others++ ? "|" : ""); if (dd->flags & ERROR_EXCLUDED) fprintf(fp, "%sERROR_EXCLUDED", others++ ? "|" : ""); if (dd->flags & ZERO_EXCLUDED) fprintf(fp, "%sZERO_EXCLUDED", others++ ? "|" : ""); if (dd->flags & NO_ELF_NOTES) fprintf(fp, "%sNO_ELF_NOTES", others++ ? "|" : ""); if (dd->flags & LZO_SUPPORTED) fprintf(fp, "%sLZO_SUPPORTED", others++ ? "|" : ""); if (dd->flags & SNAPPY_SUPPORTED) fprintf(fp, "%sSNAPPY_SUPPORTED", others++ ? "|" : ""); fprintf(fp, ") %s\n", FLAT_FORMAT() ? "[FLAT]" : ""); fprintf(fp, " dfd: %d\n", dd->dfd); fprintf(fp, " ofp: %lx\n", (ulong)dd->ofp); fprintf(fp, " machine_type: %d ", dd->machine_type); switch (dd->machine_type) { case EM_ARM: fprintf(fp, "(EM_ARM)\n"); break; case EM_MIPS: fprintf(fp, "(EM_MIPS)\n"); break; case EM_386: fprintf(fp, "(EM_386)\n"); break; case EM_X86_64: fprintf(fp, "(EM_X86_64)\n"); break; case EM_IA_64: fprintf(fp, "(EM_IA_64)\n"); break; case EM_PPC: fprintf(fp, "(EM_PPC)\n"); break; case EM_PPC64: fprintf(fp, "(EM_PPC64)\n"); break; case EM_S390: fprintf(fp, "(EM_S390)\n"); break; case EM_AARCH64: fprintf(fp, "(EM_AARCH64)\n"); break; default: fprintf(fp, "(unknown)\n"); break; } fprintf(fp, "\n header: %lx\n", (ulong)dd->header); dh = dd->header; fprintf(fp, " signature: \""); for (i = 0; i < SIG_LEN; i++) if (dh->signature[i]) fprintf(fp, "%c", dh->signature[i]); fprintf(fp, "\"\n"); fprintf(fp, " header_version: %d\n", dh->header_version); fprintf(fp, " utsname:\n"); fprintf(fp, " sysname: %s\n", dh->utsname.sysname); fprintf(fp, " nodename: %s\n", dh->utsname.nodename); fprintf(fp, " release: %s\n", dh->utsname.release); fprintf(fp, " version: %s\n", dh->utsname.version); fprintf(fp, " machine: %s\n", dh->utsname.machine); fprintf(fp, " domainname: %s\n", dh->utsname.domainname); fprintf(fp, " timestamp:\n"); fprintf(fp, " tv_sec: %lx\n", dh->timestamp.tv_sec); fprintf(fp, " tv_usec: %lx\n", dh->timestamp.tv_usec); fprintf(fp, " status: %x (", dh->status); switch (dd->flags & (DISKDUMP_LOCAL|KDUMP_CMPRS_LOCAL)) { case DISKDUMP_LOCAL: if (dh->status == DUMP_HEADER_COMPLETED) fprintf(fp, "DUMP_HEADER_COMPLETED"); else if (dh->status == DUMP_HEADER_INCOMPLETED) fprintf(fp, "DUMP_HEADER_INCOMPLETED"); else if (dh->status == DUMP_HEADER_COMPRESSED) fprintf(fp, "DUMP_HEADER_COMPRESSED"); break; case KDUMP_CMPRS_LOCAL: if (dh->status & DUMP_DH_COMPRESSED_ZLIB) fprintf(fp, "DUMP_DH_COMPRESSED_ZLIB"); if (dh->status & DUMP_DH_COMPRESSED_LZO) fprintf(fp, "DUMP_DH_COMPRESSED_LZO"); if (dh->status & DUMP_DH_COMPRESSED_SNAPPY) fprintf(fp, "DUMP_DH_COMPRESSED_SNAPPY"); if (dh->status & DUMP_DH_COMPRESSED_INCOMPLETE) fprintf(fp, "DUMP_DH_COMPRESSED_INCOMPLETE"); if (dh->status & DUMP_DH_EXCLUDED_VMEMMAP) fprintf(fp, "DUMP_DH_EXCLUDED_VMEMMAP"); break; } fprintf(fp, ")\n"); fprintf(fp, " block_size: %d\n", dh->block_size); fprintf(fp, " sub_hdr_size: %d\n", dh->sub_hdr_size); fprintf(fp, " bitmap_blocks: %u\n", dh->bitmap_blocks); fprintf(fp, " max_mapnr: %u\n", dh->max_mapnr); fprintf(fp, " total_ram_blocks: %u\n", dh->total_ram_blocks); fprintf(fp, " device_blocks: %u\n", dh->device_blocks); fprintf(fp, " written_blocks: %u\n", dh->written_blocks); fprintf(fp, " current_cpu: %u\n", dh->current_cpu); fprintf(fp, " nr_cpus: %d\n", dh->nr_cpus); tasks = (ulong *)&dh->tasks[0]; fprintf(fp, " tasks[nr_cpus]: %lx\n", *tasks); for (tasks++, i = 1; i < dh->nr_cpus; i++) { fprintf(fp, " %lx\n", *tasks); tasks++; } fprintf(fp, "\n"); fprintf(fp, " sub_header: %lx ", (ulong)dd->sub_header); if ((dsh = dd->sub_header)) { fprintf(fp, "\n elf_regs: %lx\n", (ulong)&dsh->elf_regs); fprintf(fp, " dump_level: "); if ((pc->flags & RUNTIME) && ((dump_level = get_dump_level()) >= 0)) { fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, dump_level ? "(" : ""); #define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ #define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ #define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ #define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ #define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ others = 0; if (dump_level & DUMP_EXCLUDE_CACHE) fprintf(fp, "%sDUMP_EXCLUDE_CACHE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_CLEAN) fprintf(fp, "%sDUMP_EXCLUDE_CLEAN", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_FREE) fprintf(fp, "%sDUMP_EXCLUDE_FREE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_ANON) fprintf(fp, "%sDUMP_EXCLUDE_ANON", others++ ? "|" : ""); if (dump_level & DUMP_SAVE_PRIVATE) fprintf(fp, "%sDUMP_SAVE_PRIVATE", others++ ? "|" : ""); fprintf(fp, "%s\n\n", dump_level ? ")" : ""); } else fprintf(fp, "%s\n\n", pc->flags & RUNTIME ? "(unknown)" : "(undetermined)"); } else fprintf(fp, "(n/a)\n\n"); fprintf(fp, " sub_header_kdump: %lx ", (ulong)dd->sub_header_kdump); if ((kdsh = dd->sub_header_kdump)) { fprintf(fp, "\n phys_base: %lx\n", (ulong)kdsh->phys_base); fprintf(fp, " dump_level: "); if ((dump_level = get_dump_level()) >= 0) { fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, dump_level ? "(" : ""); #define DL_EXCLUDE_ZERO (0x001) /* Exclude Pages filled with Zeros */ #define DL_EXCLUDE_CACHE (0x002) /* Exclude Cache Pages without Private Pages */ #define DL_EXCLUDE_CACHE_PRI (0x004) /* Exclude Cache Pages with Private Pages */ #define DL_EXCLUDE_USER_DATA (0x008) /* Exclude UserProcessData Pages */ #define DL_EXCLUDE_FREE (0x010) /* Exclude Free Pages */ others = 0; if (dump_level & DL_EXCLUDE_ZERO) fprintf(fp, "%sDUMP_EXCLUDE_ZERO", others++ ? "|" : ""); if (dump_level & DL_EXCLUDE_CACHE) fprintf(fp, "%sDUMP_EXCLUDE_CACHE", others++ ? "|" : ""); if (dump_level & DL_EXCLUDE_CACHE_PRI) fprintf(fp, "%sDUMP_EXCLUDE_CACHE_PRI", others++ ? "|" : ""); if (dump_level & DL_EXCLUDE_USER_DATA) fprintf(fp, "%sDUMP_EXCLUDE_USER_DATA", others++ ? "|" : ""); if (dump_level & DL_EXCLUDE_FREE) fprintf(fp, "%sDUMP_EXCLUDE_FREE", others++ ? "|" : ""); others = 0; fprintf(fp, "%s\n", dump_level ? ")" : ""); } else fprintf(fp, "(unknown)\n"); if (dh->header_version >= 2) { fprintf(fp, " split: %d\n", kdsh->split); fprintf(fp, " start_pfn: "); if (KDUMP_SPLIT()) fprintf(fp, "%ld (0x%lx)\n", kdsh->start_pfn, kdsh->start_pfn); else fprintf(fp, "(unused)\n"); fprintf(fp, " end_pfn: "); if (KDUMP_SPLIT()) fprintf(fp, "%ld (0x%lx)\n", kdsh->end_pfn, kdsh->end_pfn); else fprintf(fp, "(unused)\n"); } if (dh->header_version >= 3) { fprintf(fp, " offset_vmcoreinfo: %llu (0x%llx)\n", (ulonglong)dd->sub_header_kdump->offset_vmcoreinfo, (ulonglong)dd->sub_header_kdump->offset_vmcoreinfo); fprintf(fp, " size_vmcoreinfo: %lu (0x%lx)\n", dd->sub_header_kdump->size_vmcoreinfo, dd->sub_header_kdump->size_vmcoreinfo); if (dd->sub_header_kdump->offset_vmcoreinfo && dd->sub_header_kdump->size_vmcoreinfo) { dump_vmcoreinfo(fp); } } if (dh->header_version >= 4) { fprintf(fp, " offset_note: %llu (0x%llx)\n", (ulonglong)dd->sub_header_kdump->offset_note, (ulonglong)dd->sub_header_kdump->offset_note); fprintf(fp, " size_note: %lu (0x%lx)\n", dd->sub_header_kdump->size_note, dd->sub_header_kdump->size_note); fprintf(fp, " notes_buf: %lx\n", (ulong)dd->notes_buf); fprintf(fp, " num_prstatus_notes: %d\n", dd->num_prstatus_notes); for (i = 0; i < dd->num_prstatus_notes; i++) { fprintf(fp, " notes[%d]: %lx %s\n", i, (ulong)dd->nt_prstatus_percpu[i], dd->nt_prstatus_percpu[i] ? "(NT_PRSTATUS)" : ""); display_ELF_note(dd->machine_type, PRSTATUS_NOTE, dd->nt_prstatus_percpu[i], fp); } fprintf(fp, " snapshot_task: %lx %s\n", dd->snapshot_task, dd->snapshot_task ? "(NT_TASKSTRUCT)" : ""); fprintf(fp, " num_qemu_notes: %d\n", dd->num_qemu_notes); for (i = 0; i < dd->num_qemu_notes; i++) { fprintf(fp, " notes[%d]: %lx (QEMUCPUState)\n", i, (ulong)dd->nt_qemu_percpu[i]); display_ELF_note(dd->machine_type, QEMU_NOTE, dd->nt_qemu_percpu[i], fp); } dump_note_offsets(fp); } if (dh->header_version >= 5) { fprintf(fp, " offset_eraseinfo: %llu (0x%llx)\n", (ulonglong)dd->sub_header_kdump->offset_eraseinfo, (ulonglong)dd->sub_header_kdump->offset_eraseinfo); fprintf(fp, " size_eraseinfo: %lu (0x%lx)\n", dd->sub_header_kdump->size_eraseinfo, dd->sub_header_kdump->size_eraseinfo); if (dd->sub_header_kdump->offset_eraseinfo && dd->sub_header_kdump->size_eraseinfo) { dump_eraseinfo(fp); } } if (dh->header_version >= 6) { fprintf(fp, " start_pfn_64: "); if (KDUMP_SPLIT()) fprintf(fp, "%lld (0x%llx)\n", kdsh->start_pfn_64, kdsh->start_pfn_64); else fprintf(fp, "(unused)\n"); fprintf(fp, " end_pfn_64: "); if (KDUMP_SPLIT()) fprintf(fp, "%lld (0x%llx)\n", kdsh->end_pfn_64, kdsh->end_pfn_64); else fprintf(fp, "(unused)\n"); fprintf(fp, " max_mapnr_64: %llu (0x%llx)\n", kdsh->max_mapnr_64, kdsh->max_mapnr_64); } fprintf(fp, "\n"); } else fprintf(fp, "(n/a)\n\n"); fprintf(fp, " data_offset: %lx\n", (ulong)dd->data_offset); fprintf(fp, " block_size: %d\n", dd->block_size); fprintf(fp, " block_shift: %d\n", dd->block_shift); fprintf(fp, " bitmap: %lx\n", (ulong)dd->bitmap); fprintf(fp, " bitmap_len: %lld\n", (ulonglong)dd->bitmap_len); fprintf(fp, " max_mapnr: %lld (0x%llx)\n", dd->max_mapnr, dd->max_mapnr); fprintf(fp, " dumpable_bitmap: %lx\n", (ulong)dd->dumpable_bitmap); fprintf(fp, " byte: %d\n", dd->byte); fprintf(fp, " bit: %d\n", dd->bit); fprintf(fp, " compressed_page: %lx\n", (ulong)dd->compressed_page); fprintf(fp, " curbufptr: %lx\n\n", (ulong)dd->curbufptr); for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) { fprintf(fp, "%spage_cache_hdr[%d]:\n", i < 10 ? " " : "", i); fprintf(fp, " pg_flags: %x (", dd->page_cache_hdr[i].pg_flags); others = 0; if (dd->page_cache_hdr[i].pg_flags & PAGE_VALID) fprintf(fp, "%sPAGE_VALID", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " pg_addr: %llx\n", (ulonglong)dd->page_cache_hdr[i].pg_addr); fprintf(fp, " pg_bufptr: %lx\n", (ulong)dd->page_cache_hdr[i].pg_bufptr); fprintf(fp, " pg_hit_count: %ld\n", dd->page_cache_hdr[i].pg_hit_count); } fprintf(fp, "\n page_cache_buf: %lx\n", (ulong)dd->page_cache_buf); fprintf(fp, " evict_index: %d\n", dd->evict_index); fprintf(fp, " evictions: %ld\n", dd->evictions); fprintf(fp, " accesses: %ld\n", dd->accesses); fprintf(fp, " cached_reads: %ld ", dd->cached_reads); if (dd->accesses) fprintf(fp, "(%ld%%)\n", dd->cached_reads * 100 / dd->accesses); else fprintf(fp, "\n"); fprintf(fp, " valid_pages: %lx\n", (ulong)dd->valid_pages); return 0; } /* * Wrapper of __diskdump_memory_dump() */ int diskdump_memory_dump(FILE *fp) { int i; if (KDUMP_SPLIT() && (dd_list != NULL)) for (i = 0; i < num_dumpfiles; i++) { dd = dd_list[i]; __diskdump_memory_dump(fp); fprintf(fp, "\n"); } else __diskdump_memory_dump(fp); return 0; } /* * Get the switch_stack address of the passed-in task. */ ulong get_diskdump_switch_stack(ulong task) { return 0; } /* * Versions of disk_dump that support it contain the "dump_level" symbol. * Version 1 and later compressed kdump dumpfiles contain the dump level * in an additional field of the sub_header_kdump structure. */ static int get_dump_level(void) { int dump_level; if (DISKDUMP_VALID()) { if (symbol_exists("dump_level") && readmem(symbol_value("dump_level"), KVADDR, &dump_level, sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) return dump_level; } else if (KDUMP_CMPRS_VALID()) { if (dd->header->header_version >= 1) return dd->sub_header_kdump->dump_level; } return -1; } /* * Used by the "sys" command to display [PARTIAL DUMP] * after the dumpfile name. */ int is_partial_diskdump(void) { return (get_dump_level() > 0 ? TRUE : FALSE); } /* * Used by "sys" command to dump multiple split dumpfiles. */ void show_split_dumpfiles(void) { int i; struct diskdump_data *ddp; struct disk_dump_header *dh; for (i = 0; i < num_dumpfiles; i++) { ddp = dd_list[i]; dh = ddp->header; fprintf(fp, "%s%s%s%s%s", i ? " " : "", ddp->filename, is_partial_diskdump() ? " [PARTIAL DUMP]" : "", dh->status & DUMP_DH_COMPRESSED_INCOMPLETE ? " [INCOMPLETE]" : "", dh->status & DUMP_DH_EXCLUDED_VMEMMAP ? " [EXCLUDED VMEMMAP]" : ""); if ((i+1) < num_dumpfiles) fprintf(fp, "\n"); } } void * diskdump_get_prstatus_percpu(int cpu) { int online; if ((cpu < 0) || (cpu >= dd->num_prstatus_notes)) return NULL; /* * If no cpu mapping was done, then there must be * a one-to-one relationship between the number * of online cpus and the number of notes. */ if ((online = get_cpus_online()) && (online == kt->cpus) && (online != dd->num_prstatus_notes)) return NULL; return dd->nt_prstatus_percpu[cpu]; } /* * Reads a string value from VMCOREINFO. * * Returns a string (that has to be freed by the caller) that contains the * value for key or NULL if the key has not been found. */ static char * vmcoreinfo_read_string(const char *key) { char *buf, *value_string, *p1, *p2; size_t value_length; ulong size_vmcoreinfo; off_t offset; char keybuf[BUFSIZE]; const off_t failed = (off_t)-1; if (dd->header->header_version < 3) return NULL; buf = value_string = NULL; size_vmcoreinfo = dd->sub_header_kdump->size_vmcoreinfo; offset = dd->sub_header_kdump->offset_vmcoreinfo; sprintf(keybuf, "%s=", key); if ((buf = malloc(size_vmcoreinfo+1)) == NULL) { error(INFO, "compressed kdump: cannot malloc vmcoreinfo" " buffer\n"); goto err; } if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, buf, size_vmcoreinfo)) { error(INFO, "compressed kdump: cannot read vmcoreinfo data\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek dump vmcoreinfo\n"); goto err; } if (read(dd->dfd, buf, size_vmcoreinfo) < size_vmcoreinfo) { error(INFO, "compressed kdump: cannot read vmcoreinfo data\n"); goto err; } } buf[size_vmcoreinfo] = '\n'; if ((p1 = strstr(buf, keybuf))) { p2 = p1 + strlen(keybuf); p1 = strstr(p2, "\n"); value_length = p1-p2; value_string = calloc(value_length+1, sizeof(char)); strncpy(value_string, p2, value_length); value_string[value_length] = NULLCHAR; } err: if (buf) free(buf); return value_string; } static void diskdump_get_osrelease(void) { char *string; if ((string = vmcoreinfo_read_string("OSRELEASE"))) { fprintf(fp, "%s\n", string); free(string); } else pc->flags2 &= ~GET_OSRELEASE; } static int valid_note_address(unsigned char *offset) { if (offset > (dd->notes_buf + dd->sub_header_kdump->size_note)) return FALSE; return TRUE; } void diskdump_display_regs(int cpu, FILE *ofp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; char *user_regs; size_t len; if ((cpu < 0) || (cpu >= dd->num_prstatus_notes) || (dd->nt_prstatus_percpu[cpu] == NULL)) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } if (machine_type("X86_64")) { note64 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); if (!valid_note_address((unsigned char *)note64 + len)) { error(INFO, "invalid NT_PRSTATUS note for cpu %d\n", cpu); return; } user_regs = (char *)note64 + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", ULONGLONG(user_regs + OFFSET(user_regs_struct_rip)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_eflags)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rax)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rcx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r8)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r9)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r10)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r11)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r12)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r13)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r14)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r15)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)) ); } if (machine_type("PPC64")) { struct ppc64_elf_prstatus *prs; struct ppc64_pt_regs *pr; note64 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); if (!valid_note_address((unsigned char *)note64 + len)) { error(INFO, "invalid NT_PRSTATUS note for cpu %d\n", cpu); return; } prs = (struct ppc64_elf_prstatus *) ((char *)note64 + sizeof(Elf64_Nhdr) + note64->n_namesz); prs = (struct ppc64_elf_prstatus *)roundup((ulong)prs, 4); pr = &prs->pr_reg; fprintf(ofp, " R0: %016lx R1: %016lx R2: %016lx\n" " R3: %016lx R4: %016lx R5: %016lx\n" " R6: %016lx R7: %016lx R8: %016lx\n" " R9: %016lx R10: %016lx R11: %016lx\n" " R12: %016lx R13: %016lx R14: %016lx\n" " R15: %016lx R16: %016lx R16: %016lx\n" " R18: %016lx R19: %016lx R20: %016lx\n" " R21: %016lx R22: %016lx R23: %016lx\n" " R24: %016lx R25: %016lx R26: %016lx\n" " R27: %016lx R28: %016lx R29: %016lx\n" " R30: %016lx R31: %016lx\n" " NIP: %016lx MSR: %016lx\n" " OGPR3: %016lx CTR: %016lx\n" " LINK: %016lx XER: %016lx\n" " CCR: %016lx MQ: %016lx\n" " TRAP: %016lx DAR: %016lx\n" " DSISR: %016lx RESULT: %016lx\n", pr->gpr[0], pr->gpr[1], pr->gpr[2], pr->gpr[3], pr->gpr[4], pr->gpr[5], pr->gpr[6], pr->gpr[7], pr->gpr[8], pr->gpr[9], pr->gpr[10], pr->gpr[11], pr->gpr[12], pr->gpr[13], pr->gpr[14], pr->gpr[15], pr->gpr[16], pr->gpr[17], pr->gpr[18], pr->gpr[19], pr->gpr[20], pr->gpr[21], pr->gpr[22], pr->gpr[23], pr->gpr[24], pr->gpr[25], pr->gpr[26], pr->gpr[27], pr->gpr[28], pr->gpr[29], pr->gpr[30], pr->gpr[31], pr->nip, pr->msr, pr->orig_gpr3, pr->ctr, pr->link, pr->xer, pr->ccr, pr->mq, pr->trap, pr->dar, pr->dsisr, pr->result); } if (machine_type("ARM64")) { note64 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); if (!valid_note_address((unsigned char *)note64 + len)) { error(INFO, "invalid NT_PRSTATUS note for cpu %d\n", cpu); return; } user_regs = (char *)note64 + len - SIZE(elf_prstatus) + OFFSET(elf_prstatus_pr_reg); fprintf(ofp, " X0: %016lx X1: %016lx X2: %016lx\n" " X3: %016lx X4: %016lx X5: %016lx\n" " X6: %016lx X7: %016lx X8: %016lx\n" " X9: %016lx X10: %016lx X11: %016lx\n" " X12: %016lx X13: %016lx X14: %016lx\n" " X15: %016lx X16: %016lx X17: %016lx\n" " X18: %016lx X19: %016lx X20: %016lx\n" " X21: %016lx X22: %016lx X23: %016lx\n" " X24: %016lx X25: %016lx X26: %016lx\n" " X27: %016lx X28: %016lx X29: %016lx\n" " LR: %016lx SP: %016lx PC: %016lx\n" " PSTATE: %08lx FPVALID: %08x\n", ULONG(user_regs + sizeof(ulong) * 0), ULONG(user_regs + sizeof(ulong) * 1), ULONG(user_regs + sizeof(ulong) * 2), ULONG(user_regs + sizeof(ulong) * 3), ULONG(user_regs + sizeof(ulong) * 4), ULONG(user_regs + sizeof(ulong) * 5), ULONG(user_regs + sizeof(ulong) * 6), ULONG(user_regs + sizeof(ulong) * 7), ULONG(user_regs + sizeof(ulong) * 8), ULONG(user_regs + sizeof(ulong) * 9), ULONG(user_regs + sizeof(ulong) * 10), ULONG(user_regs + sizeof(ulong) * 11), ULONG(user_regs + sizeof(ulong) * 12), ULONG(user_regs + sizeof(ulong) * 13), ULONG(user_regs + sizeof(ulong) * 14), ULONG(user_regs + sizeof(ulong) * 15), ULONG(user_regs + sizeof(ulong) * 16), ULONG(user_regs + sizeof(ulong) * 17), ULONG(user_regs + sizeof(ulong) * 18), ULONG(user_regs + sizeof(ulong) * 19), ULONG(user_regs + sizeof(ulong) * 20), ULONG(user_regs + sizeof(ulong) * 21), ULONG(user_regs + sizeof(ulong) * 22), ULONG(user_regs + sizeof(ulong) * 23), ULONG(user_regs + sizeof(ulong) * 24), ULONG(user_regs + sizeof(ulong) * 25), ULONG(user_regs + sizeof(ulong) * 26), ULONG(user_regs + sizeof(ulong) * 27), ULONG(user_regs + sizeof(ulong) * 28), ULONG(user_regs + sizeof(ulong) * 29), ULONG(user_regs + sizeof(ulong) * 30), ULONG(user_regs + sizeof(ulong) * 31), ULONG(user_regs + sizeof(ulong) * 32), ULONG(user_regs + sizeof(ulong) * 33), UINT(user_regs + sizeof(ulong) * 34)); } if (machine_type("X86")) { note32 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = (char *)note32 + len - SIZE(user_regs_struct) - sizeof(int); if (!valid_note_address((unsigned char *)note32 + len)) { error(INFO, "invalid NT_PRSTATUS note for cpu %d\n", cpu); return; } fprintf(ofp, " EAX: %08x EBX: %08x ECX: %08x EDX: %08x\n" " ESP: %08x EIP: %08x ESI: %08x EDI: %08x\n" " CS: %04x DS: %04x ES: %04x FS: %04x\n" " GS: %04x SS: %04x\n" " EBP: %08x EFLAGS: %08x\n", UINT(user_regs + OFFSET(user_regs_struct_eax)), UINT(user_regs + OFFSET(user_regs_struct_ebx)), UINT(user_regs + OFFSET(user_regs_struct_ecx)), UINT(user_regs + OFFSET(user_regs_struct_edx)), UINT(user_regs + OFFSET(user_regs_struct_esp)), UINT(user_regs + OFFSET(user_regs_struct_eip)), UINT(user_regs + OFFSET(user_regs_struct_esi)), UINT(user_regs + OFFSET(user_regs_struct_edi)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ds)), USHORT(user_regs + OFFSET(user_regs_struct_es)), USHORT(user_regs + OFFSET(user_regs_struct_fs)), USHORT(user_regs + OFFSET(user_regs_struct_gs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)), UINT(user_regs + OFFSET(user_regs_struct_ebp)), UINT(user_regs + OFFSET(user_regs_struct_eflags)) ); } } void dump_registers_for_compressed_kdump(void) { int c; if (!KDUMP_CMPRS_VALID() || (dd->header->header_version < 4) || !(machine_type("X86") || machine_type("X86_64") || machine_type("ARM64") || machine_type("PPC64"))) error(FATAL, "-r option not supported for this dumpfile\n"); if (machine_type("ARM64") && (kt->cpus != dd->num_prstatus_notes)) fprintf(fp, "NOTE: cpus: %d NT_PRSTATUS notes: %d " "(note-to-cpu mapping is questionable)\n\n", kt->cpus, dd->num_prstatus_notes); for (c = 0; c < kt->cpus; c++) { if (hide_offline_cpu(c)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", c ? "\n" : "", c); continue; } else fprintf(fp, "%sCPU %d:\n", c ? "\n" : "", c); diskdump_display_regs(c, fp); } } crash-7.1.4/sadump.c0000664000000000000000000012540712634305150012770 0ustar rootroot/* * sadump.h - core analysis suite * * Copyright (c) 2011 FUJITSU LIMITED * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: HATAYAMA Daisuke */ #include "defs.h" #include "sadump.h" #include /* htonl, htons */ #include #include enum { failed = -1 }; static struct sadump_data sadump_data = { 0 }; static struct sadump_data *sd = &sadump_data; static int read_device(void *buf, size_t bytes, ulong *offset); static int read_dump_header(char *file); static int add_disk(char *file); static int open_dump_file(char *file); static int open_disk(char *file); static uint64_t paddr_to_pfn(physaddr_t paddr); static inline int is_set_bit(char *bitmap, uint64_t pfn); static inline int page_is_ram(uint64_t nr); static inline int page_is_dumpable(uint64_t nr); static int lookup_diskset(uint64_t whole_offset, int *diskid, uint64_t *disk_offset); static struct tm *efi_time_t_to_tm(const efi_time_t *e); static char * guid_to_str(efi_guid_t *guid, char *buf, size_t buflen); static int verify_magic_number(uint32_t magicnum[DUMP_PART_HEADER_MAGICNUM_SIZE]); static ulong per_cpu_ptr(ulong ptr, int cpu); static ulong early_per_cpu_ptr(char *symbol, struct syment *sym, int cpu); static ulong legacy_per_cpu_ptr(ulong ptr, int cpu); static int get_prstatus_from_crash_notes(int cpu, char *prstatus); static void display_smram_cpu_state(int apicid, struct sadump_smram_cpu_state *s); static int cpu_to_apicid(int cpu, int *apicid); static int get_sadump_smram_cpu_state(int cpu, struct sadump_smram_cpu_state *smram); static int block_table_init(void); static uint64_t pfn_to_block(uint64_t pfn); struct sadump_data * sadump_get_sadump_data(void) { if (!SADUMP_VALID() || !SADUMP_DUMPFILE()) return NULL; return &sadump_data; } int sadump_cleanup_sadump_data(void) { int i; if (!SADUMP_VALID() || !SADUMP_DUMPFILE()) return FALSE; if (sd->flags & SADUMP_DISKSET) { for (i = 1; i < sd->sd_list_len; ++i) { if (sd->sd_list[i]->dfd) close(sd->sd_list[i]->dfd); free(sd->sd_list[i]->header); free(sd->sd_list[i]); } } close(sd->dfd); free(sd->header); free(sd->dump_header); free(sd->diskset_header); free(sd->bitmap); free(sd->dumpable_bitmap); free(sd->page_buf); free(sd->block_table); if (sd->sd_list[0]) free(sd->sd_list[0]); free(sd->sd_list); memset(&sadump_data, 0, sizeof(sadump_data)); pc->flags &= ~SADUMP; pc->dumpfile = NULL; pc->readmem = NULL; pc->writemem = NULL; return TRUE; } static int read_device(void *buf, size_t bytes, ulong *offset) { if (lseek(sd->dfd, *offset, SEEK_SET) == failed) { error(INFO, "sadump: cannot lseek dump device\n"); return FALSE; } if (read(sd->dfd, buf, bytes) < bytes) { error(INFO, "sadump: cannot read dump device\n"); return FALSE; } *offset += bytes; return TRUE; } static int read_dump_header(char *file) { struct sadump_part_header *sph = NULL; struct sadump_header *sh = NULL; struct sadump_disk_set_header *new, *sdh = NULL; struct sadump_media_header *smh = NULL; struct sadump_diskset_data *sd_list_len_0 = NULL; size_t block_size = SADUMP_DEFAULT_BLOCK_SIZE; ulong flags = 0; ulong offset = 0, sub_hdr_offset, data_offset; uint32_t smram_cpu_state_size = 0; ulong bitmap_len, dumpable_bitmap_len; char *bitmap = NULL, *dumpable_bitmap = NULL, *page_buf = NULL; char guid1[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; char guid2[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; sph = malloc(block_size); if (!sph) { error(INFO, "sadump: cannot allocate partition header buffer\n"); goto err; } sdh = malloc(block_size); if (!sdh) { error(INFO, "sadump: cannot allocate disk set header buffer\n"); goto err; } sh = malloc(block_size); if (!sh) { error(INFO, "sadump: cannot allocate dump header buffer\n"); goto err; } smh = malloc(block_size); if (!smh) { error(INFO, "sadump: cannot allocate media header buffer\n"); goto err; } restart: if (block_size < 0) return FALSE; if (!read_device(sph, block_size, &offset)) { error(INFO, "sadump: cannot read partition header\n"); goto err; } if (sph->signature1 != SADUMP_SIGNATURE1 || sph->signature2 != SADUMP_SIGNATURE2) { flags |= SADUMP_MEDIA; if (CRASHDEBUG(1)) error(INFO, "sadump: read dump device as media " "format\n"); offset = 0; if (!read_device(smh, block_size, &offset)) { error(INFO, "sadump: cannot read media header\n"); goto err; } if (!read_device(sph, block_size, &offset)) { error(INFO, "sadump: cannot read partition header\n"); goto err; } if (sph->signature1 != SADUMP_SIGNATURE1 || sph->signature2 != SADUMP_SIGNATURE2) { if (CRASHDEBUG(1)) error(INFO, "sadump: does not have partition " "header\n"); goto err; } } if (!verify_magic_number(sph->magicnum)) { error(INFO, "sadump: invalid magic number\n"); goto err; } if (!(flags & SADUMP_MEDIA) && sph->set_disk_set) { uint32_t header_blocks; size_t header_size; flags |= SADUMP_DISKSET; if (CRASHDEBUG(1)) error(INFO, "sadump: read dump device as diskset\n"); if (sph->set_disk_set != 1 || sph->set_disk_set > SADUMP_MAX_DISK_SET_NUM) { if (CRASHDEBUG(1)) error(INFO, "sadump: invalid disk set number: " "%d\n", sph->set_disk_set); goto err; } if (!read_device(&header_blocks, sizeof(uint32_t), &offset)) { error(INFO, "sadump: cannot read disk set header " "size\n"); goto err; } offset -= sizeof(uint32_t); header_size = header_blocks * block_size; if (header_size > block_size) { new = realloc(sdh, header_size); if (!new) { error(INFO, "sadump: cannot re-allocate disk " "set buffer\n"); goto err; } sdh = new; } if (!read_device(sdh, header_size, &offset)) { error(INFO, "sadump: cannot read disk set header\n"); goto err; } } if (!read_device(sh, block_size, &offset)) { error(INFO, "sadump: cannot read dump header\n"); goto err; } sub_hdr_offset = offset; if (strncmp(sh->signature, SADUMP_SIGNATURE, 8) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: does not have dump header\n"); goto err; } if (flags & SADUMP_MEDIA) { if (memcmp(&sph->sadump_id, &smh->sadump_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: system ID mismatch\n" " partition header: %s\n" " media header: %s\n", guid_to_str(&sph->sadump_id, guid1, sizeof(guid1)), guid_to_str(&smh->sadump_id, guid2, sizeof(guid2))); goto err; } if (memcmp(&sph->disk_set_id, &smh->disk_set_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: disk set ID mismatch\n" " partition header: %s\n" " media header: %s\n", guid_to_str(&sph->disk_set_id, guid1, sizeof(guid1)), guid_to_str(&smh->disk_set_id, guid2, sizeof(guid2))); goto err; } if (memcmp(&sph->time_stamp, &smh->time_stamp, sizeof(efi_time_t)) != 0) { if (CRASHDEBUG(1)) { error(INFO, "sadump: time stamp mismatch\n"); error(INFO, "sadump: partition header: %s\n", strip_linefeeds(asctime (efi_time_t_to_tm (&sph->time_stamp)))); error(INFO, "sadump: media header: %s\n", strip_linefeeds(asctime (efi_time_t_to_tm (&smh->time_stamp)))); } } if (smh->sequential_num != 1) { error(INFO, "sadump: first media file has sequential " "number %d\n", smh->sequential_num); goto err; } } if (sh->block_size != block_size) { block_size = sh->block_size; offset = 0; goto restart; } if (CRASHDEBUG(1)) { if (flags & SADUMP_MEDIA) error(INFO, "sadump: media backup file\n"); else if (flags & SADUMP_DISKSET) error(INFO, "sadump: diskset configuration with %d " "disks\n", sdh->disk_num); else error(INFO, "sadump: single partition " "configuration\n"); } flags |= SADUMP_LOCAL; switch (sh->header_version) { case 0: sd->max_mapnr = (uint64_t)sh->max_mapnr; break; default: error(WARNING, "sadump: unsupported header version: %u\n" "sadump: assuming header version: 1\n", sh->header_version); case 1: sd->max_mapnr = sh->max_mapnr_64; break; } if (sh->sub_hdr_size > 0) { if (!read_device(&smram_cpu_state_size, sizeof(uint32_t), &offset)) { error(INFO, "sadump: cannot read SMRAM CPU STATE size\n"); goto err; } smram_cpu_state_size /= sh->nr_cpus; offset -= sizeof(uint32_t); offset += sh->sub_hdr_size * block_size; } if (!sh->bitmap_blocks) { error(INFO, "sadump: bitmap_blocks is zero\n"); goto err; } bitmap_len = block_size * sh->bitmap_blocks; bitmap = calloc(bitmap_len, 1); if (!bitmap) { error(INFO, "sadump: cannot allocate memory for bitmap " "buffer\n"); goto err; } if (!read_device(bitmap, bitmap_len, &offset)) { error(INFO, "sadump: cannot read bitmap\n"); goto err; } if (!sh->dumpable_bitmap_blocks) { error(INFO, "sadump: dumpable_bitmap_blocks is zero\n"); goto err; } dumpable_bitmap_len = block_size * sh->dumpable_bitmap_blocks; dumpable_bitmap = calloc(dumpable_bitmap_len, 1); if (!dumpable_bitmap) { error(INFO, "sadump: cannot allocate memory for " "dumpable_bitmap buffer\n"); goto err; } if (!read_device(dumpable_bitmap, dumpable_bitmap_len, &offset)) { error(INFO, "sadump: cannot read dumpable bitmap\n"); goto err; } data_offset = offset; page_buf = malloc(block_size); if (!page_buf) { error(INFO, "sadump: cannot allocate page buffer\n"); goto err; } sd->filename = file; sd->flags = flags; if (machine_type("X86")) sd->machine_type = EM_386; else if (machine_type("X86_64")) sd->machine_type = EM_X86_64; else { error(INFO, "sadump: unsupported machine type: %s\n", MACHINE_TYPE); goto err; } sd->data_offset = data_offset; sd->block_size = block_size; sd->block_shift = ffs(sd->block_size) - 1; sd->bitmap = bitmap; sd->dumpable_bitmap = dumpable_bitmap; sd->sub_hdr_offset = sub_hdr_offset; sd->smram_cpu_state_size = smram_cpu_state_size; sd->header = sph; sd->dump_header = sh; if (flags & SADUMP_DISKSET) sd->diskset_header = sdh; if (flags & SADUMP_MEDIA) sd->media_header = smh; sd->page_buf = page_buf; if (flags & SADUMP_DISKSET) { sd_list_len_0 = malloc(sizeof(struct sadump_diskset_data)); if (!sd_list_len_0) { error(INFO, "sadump: cannot allocate diskset data buffer\n"); goto err; } sd_list_len_0->filename = sd->filename; sd_list_len_0->dfd = sd->dfd; sd_list_len_0->header = sd->header; sd_list_len_0->data_offset = sd->data_offset; sd->sd_list = malloc(sizeof(struct sadump_diskset_data *)); if (!sd->sd_list) { error(INFO, "sadump: cannot allocate diskset list buffer\n"); goto err; } sd->sd_list_len = 1; sd->sd_list[0] = sd_list_len_0; } if (!block_table_init()) { error(INFO, "sadump: cannot initialize block hash table\n"); goto err; } if (!(flags & SADUMP_DISKSET)) free(sdh); if (!(flags & SADUMP_MEDIA)) free(smh); return TRUE; err: close(sd->dfd); free(sph); free(sdh); free(sh); free(smh); free(bitmap); free(dumpable_bitmap); free(page_buf); free(sd_list_len_0); free(sd->sd_list); return FALSE; } static int add_disk(char *file) { struct sadump_part_header *ph; struct sadump_diskset_data *this_disk; int diskid; char guid1[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; char guid2[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; diskid = sd->sd_list_len - 1; this_disk = sd->sd_list[diskid]; if (CRASHDEBUG(1)) error(INFO, "sadump: add disk #%d\n", diskid+1); ph = malloc(sd->block_size); if (!ph) { error(INFO, "sadump: cannot malloc block_size buffer\n"); return FALSE; } if (lseek(this_disk->dfd, 0, SEEK_SET) == failed) { error(INFO, "sadump: cannot lseek dump partition header\n"); free(ph); return FALSE; } if (read(this_disk->dfd, ph, sd->block_size) < sd->block_size) { error(INFO, "sadump: cannot read dump partition header\n"); free(ph); return FALSE; } if (ph->signature1 != SADUMP_SIGNATURE1 || ph->signature2 != SADUMP_SIGNATURE2) { if (CRASHDEBUG(1)) error(INFO, "sadump: does not have partition header\n"); free(ph); return FALSE; } if (memcmp(&sd->header->sadump_id, &ph->sadump_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: system ID mismatch\n" " partition header on disk #1: %s\n" " partition header on disk #%d: %s\n", guid_to_str(&sd->header->sadump_id, guid1, sizeof(guid1)), diskid+1, guid_to_str(&ph->sadump_id, guid2, sizeof(guid2))); free(ph); return FALSE; } if (memcmp(&sd->header->disk_set_id, &ph->disk_set_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: disk set ID mismatch\n" " partition header on disk #1: %s\n" " partition header on disk #%d: %s\n", guid_to_str(&sd->header->disk_set_id, guid1, sizeof(guid1)), diskid+1, guid_to_str(&ph->disk_set_id, guid2, sizeof(guid2))); free(ph); return FALSE; } if (memcmp(&sd->diskset_header->vol_info[diskid - 1].id, &ph->vol_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: volume ID mismatch\n" " disk set header on disk #1: %s\n" " partition header on disk #%d: %s\n", guid_to_str(&sd->diskset_header->vol_info[diskid-1].id, guid1, sizeof(guid1)), diskid+1, guid_to_str(&ph->vol_id, guid2, sizeof(guid2))); free(ph); return FALSE; } if (memcmp(&sd->header->time_stamp, &ph->time_stamp, sizeof(efi_time_t)) != 0) { if (CRASHDEBUG(1)) { error(INFO, "sadump: time stamp mismatch\n"); error(INFO, "sadump: partition header on disk #1: %s\n", strip_linefeeds(asctime (efi_time_t_to_tm (&sd->header->time_stamp)))); error(INFO, "sadump: partition header on disk #%d: %s\n", diskid+1, strip_linefeeds(asctime (efi_time_t_to_tm (&ph->time_stamp)))); } } if (diskid != ph->set_disk_set - 1) { if (CRASHDEBUG(1)) error(INFO, "sadump: wrong disk order; " "#%d expected but #%d given\n", diskid+1, ph->set_disk_set); free(ph); return FALSE; } this_disk->header = ph; this_disk->data_offset = sd->block_size; this_disk->filename = file; return TRUE; } static int open_dump_file(char *file) { int fd; fd = open(file, O_RDONLY); if (fd < 0) { error(INFO, "sadump: unable to open dump file %s", file); return FALSE; } sd->dfd = fd; return TRUE; } static int open_disk(char *file) { struct sadump_diskset_data *this_disk; sd->sd_list_len++; if (CRASHDEBUG(1)) error(INFO, "sadump: open disk #%d\n", sd->sd_list_len); if (sd->sd_list_len > sd->diskset_header->disk_num) { error(INFO, "sadump: too many diskset arguments; " "this diskset consists of %d disks\n", sd->diskset_header->disk_num); return FALSE; } sd->sd_list = realloc(sd->sd_list, sd->sd_list_len * sizeof(struct sadump_diskset_data *)); if (!sd->sd_list) { if (CRASHDEBUG(1)) { error(INFO, "sadump: cannot malloc diskset list buffer\n"); } return FALSE; } this_disk = malloc(sizeof(struct sadump_diskset_data)); if (!this_disk) { if (CRASHDEBUG(1)) { error(INFO, "sadump: cannot malloc diskset data buffer\n"); } return FALSE; } memset(this_disk, 0, sizeof(*this_disk)); sd->sd_list[sd->sd_list_len - 1] = this_disk; this_disk->dfd = open(file, O_RDONLY); if (!this_disk->dfd) { free(this_disk); error(INFO, "sadump: unable to open dump file %s", file); return FALSE; } return TRUE; } int is_sadump(char *file) { if (SADUMP_VALID()) { if (!(sd->flags & SADUMP_DISKSET)) { if (CRASHDEBUG(1)) error(INFO, "sadump: does not support multiple" " file formats\n"); (void) sadump_cleanup_sadump_data(); return FALSE; } if (!open_disk(file) || !add_disk(file)) { (void) sadump_cleanup_sadump_data(); return FALSE; } return TRUE; } if (!open_dump_file(file) || !read_dump_header(file)) return FALSE; return TRUE; } int sadump_is_diskset(void) { if (!SADUMP_VALID()) return FALSE; return !!(sd->flags & SADUMP_DISKSET); } uint sadump_page_size(void) { return sd->dump_header->block_size; } /* * Translate physical address in paddr to PFN number. This means normally that * we just shift paddr by some constant. */ static uint64_t paddr_to_pfn(physaddr_t paddr) { return paddr >> sd->block_shift; } static inline int is_set_bit(char *bitmap, uint64_t pfn) { ulong index, bit; index = pfn >> 3; bit = 7 - (pfn & 7); return !!(bitmap[index] & (1UL << bit)); } static inline int page_is_ram(uint64_t nr) { return is_set_bit(sd->bitmap, nr); } static inline int page_is_dumpable(uint64_t nr) { return is_set_bit(sd->dumpable_bitmap, nr); } static int lookup_diskset(uint64_t whole_offset, int *diskid, uint64_t *disk_offset) { uint64_t offset = whole_offset; int i; for (i = 0; i < sd->sd_list_len; ++i) { uint64_t used_device_i, ram_size; ulong data_offset_i; used_device_i = sd->sd_list[i]->header->used_device; data_offset_i = sd->sd_list[i]->data_offset; ram_size = used_device_i - data_offset_i; if (offset < ram_size) break; offset -= ram_size; } if (i == sd->sd_list_len) return FALSE; *diskid = i; *disk_offset = offset; return TRUE; } int read_sadump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { physaddr_t curpaddr ATTRIBUTE_UNUSED; uint64_t pfn, whole_offset, perdisk_offset, block; ulong page_offset; int dfd; if (sd->flags & SADUMP_KDUMP_BACKUP && paddr >= sd->backup_src_start && paddr < sd->backup_src_start + sd->backup_src_size) { ulong orig_paddr; orig_paddr = paddr; paddr += sd->backup_offset - sd->backup_src_start; if (CRASHDEBUG(1)) error(INFO, "sadump: kdump backup region: %#llx => %#llx\n", orig_paddr, paddr); } pfn = paddr_to_pfn(paddr); curpaddr = paddr & ~((physaddr_t)(sd->block_size-1)); page_offset = paddr & ((physaddr_t)(sd->block_size-1)); if ((pfn >= sd->max_mapnr) || !page_is_ram(pfn)) return SEEK_ERROR; if (!page_is_dumpable(pfn)) { if (!(sd->flags & SADUMP_ZERO_EXCLUDED)) return PAGE_EXCLUDED; memset(bufptr, 0, cnt); return cnt; } block = pfn_to_block(pfn); whole_offset = block * sd->block_size; if (sd->flags & SADUMP_DISKSET) { int diskid; if (!lookup_diskset(whole_offset, &diskid, &perdisk_offset)) return SEEK_ERROR; dfd = sd->sd_list[diskid]->dfd; perdisk_offset += sd->sd_list[diskid]->data_offset; } else { dfd = sd->dfd; perdisk_offset = whole_offset + sd->data_offset; } if (lseek(dfd, perdisk_offset, SEEK_SET) == failed) return SEEK_ERROR; if (read(dfd, sd->page_buf, sd->block_size) != sd->block_size) return READ_ERROR; memcpy(bufptr, sd->page_buf + page_offset, cnt); return cnt; } int write_sadump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return 0; } int sadump_init(char *unused, FILE *fptr) { if (!SADUMP_VALID()) return FALSE; return TRUE; } ulong get_sadump_panic_task(void) { return NO_TASK; } ulong get_sadump_switch_stack(ulong task) { return 0; } static struct tm * efi_time_t_to_tm(const efi_time_t *e) { static struct tm t; time_t ti; memset(&t, 0, sizeof(t)); t.tm_sec = e->second; t.tm_min = e->minute; t.tm_hour = e->hour; t.tm_mday = e->day; t.tm_mon = e->month - 1; t.tm_year = e->year - 1900; if (e->timezone != EFI_UNSPECIFIED_TIMEZONE) t.tm_hour += e->timezone; else if (CRASHDEBUG(1)) error(INFO, "sadump: timezone information is missing\n"); ti = mktime(&t); if (ti == (time_t)-1) return &t; return localtime_r(&ti, &t); } static char * guid_to_str(efi_guid_t *guid, char *buf, size_t buflen) { snprintf(buf, buflen, "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", htonl(guid->data1), htons(guid->data2), htons(guid->data3), guid->data4[0], guid->data4[1], guid->data4[2], guid->data4[3], guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]); return buf; } static int verify_magic_number(uint32_t magicnum[DUMP_PART_HEADER_MAGICNUM_SIZE]) { int i; for (i = 1; i < DUMP_PART_HEADER_MAGICNUM_SIZE; ++i) if (magicnum[i] != (magicnum[i - 1] + 7) * 11) return FALSE; return TRUE; } int sadump_memory_used(void) { return 0; } int sadump_free_memory(void) { return 0; } /* * This function is dump-type independent, and could be used to dump * the diskdump_data structure contents and perhaps the sadump header * data. */ int sadump_memory_dump(FILE *fp) { struct sadump_part_header *sph; struct sadump_disk_set_header *sdh; struct sadump_header *sh; struct sadump_media_header *smh; int i, others; char guid[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; fprintf(fp, "sadump_data: \n"); fprintf(fp, " filename: %s\n", sd->filename); fprintf(fp, " flags: %lx (", sd->flags); others = 0; if (sd->flags & SADUMP_LOCAL) fprintf(fp, "%sSADUMP_LOCAL", others++ ? "|" : ""); if (sd->flags & SADUMP_DISKSET) fprintf(fp, "%sSADUMP_DISKSET", others++ ? "|" : ""); if (sd->flags & SADUMP_MEDIA) fprintf(fp, "%sSADUMP_MEDIA", others++ ? "|" : ""); if (sd->flags & SADUMP_ZERO_EXCLUDED) fprintf(fp, "%sSADUMP_ZERO_EXCLUDED", others++ ? "|" : ""); if (sd->flags & SADUMP_KDUMP_BACKUP) fprintf(fp, "%sSADUMP_KDUMP_BACKUP", others++ ? "|" : ""); fprintf(fp, ") \n"); fprintf(fp, " dfd: %d\n", sd->dfd); fprintf(fp, " machine_type: %d ", sd->machine_type); switch (sd->machine_type) { case EM_386: fprintf(fp, "(EM_386)\n"); break; case EM_X86_64: fprintf(fp, "(EM_X86_64)\n"); break; default: fprintf(fp, "(unknown)\n"); break; } fprintf(fp, "\n header: %lx\n", (ulong)sd->header); sph = sd->header; fprintf(fp, " signature1: %x\n", sph->signature1); fprintf(fp, " signature2: %x\n", sph->signature2); fprintf(fp, " enable: %u\n", sph->enable); fprintf(fp, " reboot: %u\n", sph->reboot); fprintf(fp, " compress: %u\n", sph->compress); fprintf(fp, " recycle: %u\n", sph->recycle); fprintf(fp, " label: (unused)\n"); fprintf(fp, " sadump_id: %s\n", guid_to_str(&sph->sadump_id, guid, sizeof(guid))); fprintf(fp, " disk_set_id: %s\n", guid_to_str(&sph->disk_set_id, guid, sizeof(guid))); fprintf(fp, " vol_id: %s\n", guid_to_str(&sph->vol_id, guid, sizeof(guid))); fprintf(fp, " time_stamp: %s\n", strip_linefeeds(asctime(efi_time_t_to_tm(&sph->time_stamp)))); fprintf(fp, " set_disk_set: %u\n", sph->set_disk_set); fprintf(fp, " reserve: %u\n", sph->reserve); fprintf(fp, " used_device: %llu\n", (ulonglong)sph->used_device); fprintf(fp, " magicnum: %s\n", verify_magic_number(sph->magicnum) ? "(valid)" : "(invalid)"); fprintf(fp, "\n dump header: %lx\n", (ulong)sd->dump_header); sh = sd->dump_header; fprintf(fp, " signature: %s\n", sh->signature); fprintf(fp, " header_version: %u\n", sh->header_version); fprintf(fp, " reserve: %u\n", sh->reserve); fprintf(fp, " timestamp: %s\n", strip_linefeeds(asctime(efi_time_t_to_tm(&sh->timestamp)))); fprintf(fp, " status: %u\n", sh->status); fprintf(fp, " compress: %u\n", sh->compress); fprintf(fp, " block_size: %u\n", sh->block_size); fprintf(fp, " extra_hdr_size: %u\n", sh->extra_hdr_size); fprintf(fp, " sub_hdr_size: %u\n", sh->sub_hdr_size); fprintf(fp, " bitmap_blocks: %u\n", sh->bitmap_blocks); fprintf(fp, "dumpable_bitmap_blocks: %u\n", sh->dumpable_bitmap_blocks); fprintf(fp, " max_mapnr: %u\n", sh->max_mapnr); fprintf(fp, " total_ram_blocks: %u\n", sh->total_ram_blocks); fprintf(fp, " device_blocks: %u\n", sh->device_blocks); fprintf(fp, " written_blocks: %u\n", sh->written_blocks); fprintf(fp, " current_cpu: %u\n", sh->current_cpu); fprintf(fp, " nr_cpus: %u\n", sh->nr_cpus); if (sh->header_version >= 1) { fprintf(fp, " max_mapnr_64: %" PRIu64 "\n" " total_ram_blocks_64: %" PRIu64 "\n" " device_blocks_64: %" PRIu64 "\n" " written_blocks_64: %" PRIu64 "\n", sh->max_mapnr_64, sh->total_ram_blocks_64, sh->device_blocks_64, sh->written_blocks_64); } fprintf(fp, "\n dump sub heaer: "); if (sh->sub_hdr_size > 0) { ulong offset = sd->sub_hdr_offset; struct sadump_apic_state as; struct sadump_smram_cpu_state scs, zero; uint32_t size; uint aid; memset(&zero, 0, sizeof(zero)); if (!read_device(&size, sizeof(uint32_t), &offset)) { error(INFO, "sadump: cannot read sub header size\n"); return FALSE; } fprintf(fp, "\n size: %u\n", size); for (aid = 0; aid < sh->nr_cpus; ++aid) { if (!read_device(&as, sizeof(as), &offset)) { error(INFO, "sadump: cannot read sub header " "apic_id\n"); return FALSE; } fprintf(fp, " " "apic_id[%u]: ApicId %llu: Ldr: %llu\n", aid, (ulonglong)as.ApicId, (ulonglong)as.Ldr); } for (aid = 0; aid < sh->nr_cpus; ++aid) { if (!read_device(&scs, sizeof(scs), &offset)) { error(INFO, "sadump: cannot read sub header " "cpu_state\n"); return FALSE; } if (memcmp(&scs, &zero, sizeof(scs)) != 0) { fprintf(fp, "\n"); display_smram_cpu_state(aid, &scs); } } } else fprintf(fp, "(n/a)\n"); fprintf(fp, "\n disk set header: %lx ", (ulong)sd->diskset_header); if ((sdh = sd->diskset_header)) { fprintf(fp, "\ndisk_set_header_size: %u\n", sdh->disk_set_header_size); fprintf(fp, " disk_num: %u\n", sdh->disk_num); fprintf(fp, " disk_set_size: %llu\n", (ulonglong)sdh->disk_set_size); for (i = 0; i < sdh->disk_num - 1; ++i) { struct sadump_volume_info *vol = &sdh->vol_info[i]; fprintf(fp, " vol_info[%d]: \n", i); fprintf(fp, " id: %s\n", guid_to_str(&vol->id, guid, sizeof(guid))); fprintf(fp, " vol_size: %llu\n", (ulonglong)vol->vol_size); fprintf(fp, " status: %u\n", vol->status); fprintf(fp, " cache_size: %u\n", vol->cache_size); } } else fprintf(fp, "(n/a)\n"); fprintf(fp, "\n media header: %lx ", (ulong)sd->media_header); if ((smh = sd->media_header)) { fprintf(fp, "\n sadump_id: %s\n", guid_to_str(&smh->sadump_id, guid, sizeof(guid))); fprintf(fp, " disk_set_id: %s\n", guid_to_str(&smh->disk_set_id, guid, sizeof(guid))); fprintf(fp, " time_stamp: %s\n", strip_linefeeds(asctime(efi_time_t_to_tm(&smh->time_stamp)))); fprintf(fp, " sequential_num: %d\n", smh->sequential_num); fprintf(fp, " term_cord: %d\n", smh->term_cord); fprintf(fp, "disk_set_header_size: %d\n", smh->disk_set_header_size); fprintf(fp, " disks_in_use: %d\n", smh->disks_in_use); fprintf(fp, " reserve: (not displayed) \n"); } else fprintf(fp, "(n/a)\n"); fprintf(fp, "\n bitmap: %lx\n", (ulong)sd->bitmap); fprintf(fp, " dumpable_bitmap: %lx\n", (ulong)sd->dumpable_bitmap); fprintf(fp, " sub_hdr_offset: %lx\n", (ulong)sd->sub_hdr_offset); fprintf(fp, "smram_cpu_state_size: %lx\n", (ulong)sd->smram_cpu_state_size); fprintf(fp, " data_offset: %lx\n", sd->data_offset); fprintf(fp, " block_size: %d\n", sd->block_size); fprintf(fp, " block_shift: %d\n", sd->block_shift); fprintf(fp, " page_buf: %lx\n", (ulong)sd->page_buf); fprintf(fp, " block_table: %lx\n", (ulong)sd->block_table); fprintf(fp, " sd_list_len: %d\n", sd->sd_list_len); fprintf(fp, " sd_list: %lx\n", (ulong)sd->sd_list); fprintf(fp, " backup_src_start: %llx\n", sd->backup_src_start); fprintf(fp, " backup_src_size: %lx\n", sd->backup_src_size); fprintf(fp, " backup_offset: %llx\n", (ulonglong)sd->backup_src_size); for (i = 0; i < sd->sd_list_len; ++i) { struct sadump_diskset_data *sdd = sd->sd_list[i]; fprintf(fp, "\n sd_list[%d]: \n", i); fprintf(fp, " filename: %s\n", sdd->filename); fprintf(fp, " dfd: %d\n", sdd->dfd); fprintf(fp, " header: %lx\n", (ulong)sdd->header); sph = sdd->header; fprintf(fp, " signature1: %x\n", sph->signature1); fprintf(fp, " signature2: %x\n", sph->signature2); fprintf(fp, " enable: %u\n", sph->enable); fprintf(fp, " reboot: %u\n", sph->reboot); fprintf(fp, " compress: %u\n", sph->compress); fprintf(fp, " recycle: %u\n", sph->recycle); fprintf(fp, " label: (unused)\n"); fprintf(fp, " sadump_id: %s\n", guid_to_str(&sph->sadump_id, guid, sizeof(guid))); fprintf(fp, " disk_set_id: %s\n", guid_to_str(&sph->disk_set_id, guid, sizeof(guid))); fprintf(fp, " vol_id: %s\n", guid_to_str(&sph->vol_id, guid, sizeof(guid))); fprintf(fp, " time_stamp: %s\n", strip_linefeeds(asctime(efi_time_t_to_tm(&sph->time_stamp)))); fprintf(fp, " set_disk_set: %u\n", sph->set_disk_set); fprintf(fp, " reserve: %u\n", sph->reserve); fprintf(fp, " used_device: %llu\n", (ulonglong)sph->used_device); fprintf(fp, " magicnum: %s\n", verify_magic_number(sph->magicnum) ? "(valid)" : "(invalid)"); fprintf(fp, " data_offset: %lx\n", sdd->data_offset); } return TRUE; } static ulong per_cpu_ptr(ulong ptr, int cpu) { if (cpu < 0 || cpu >= kt->cpus) return 0UL; if (kt->cpus == 1) return ptr; if (!(kt->flags & PER_CPU_OFF)) return 0UL; if (machine_type("X86_64")) { ulong __per_cpu_load; readmem(symbol_value("__per_cpu_load"), KVADDR, &__per_cpu_load, sizeof(__per_cpu_load), "__per_cpu_load", FAULT_ON_ERROR); if (kt->__per_cpu_offset[cpu] == __per_cpu_load) return 0UL; } else if (machine_type("X86")) { if (kt->__per_cpu_offset[cpu] == 0) return 0UL; } return ptr + kt->__per_cpu_offset[cpu]; } static ulong early_per_cpu_ptr(char *symbol, struct syment *sym, int cpu) { char sym_early_ptr[BUFSIZE], sym_early_map[BUFSIZE]; ulong early_ptr; if (cpu < 0 || cpu >= kt->cpus) return 0UL; if (!sym && !(sym = per_cpu_symbol_search(symbol))) return 0UL; if (!(kt->flags & SMP)) return per_cpu_ptr(sym->value, cpu); snprintf(sym_early_ptr, BUFSIZE, "%s_early_ptr", symbol); snprintf(sym_early_map, BUFSIZE, "%s_early_map", symbol); if (!symbol_exists(sym_early_ptr) || !symbol_exists(sym_early_map)) return 0UL; readmem(symbol_value(sym_early_ptr), KVADDR, &early_ptr, sizeof(early_ptr), sym_early_ptr, FAULT_ON_ERROR); return early_ptr ? symbol_value(sym_early_map)+cpu*sizeof(uint16_t) : per_cpu_ptr(sym->value, cpu); } static ulong legacy_per_cpu_ptr(ulong ptr, int cpu) { ulong addr; if (!(kt->flags & SMP)) return ptr; if (cpu < 0 || cpu >= kt->cpus) return 0UL; if (!readmem(~ptr + cpu * sizeof(ulong), KVADDR, &addr, sizeof(ulong), "search percpu_data", FAULT_ON_ERROR)) return 0UL; return addr; } /** * Retrieve eip and esp register values from crash_notes saved by * kdump at crash. If register values has not been saved yet, set 0 to * eip and esp instead. */ static int get_prstatus_from_crash_notes(int cpu, char *prstatus) { ulong crash_notes, crash_notes_ptr, percpu_addr; char *prstatus_ptr, *note_buf, *zero_buf, *name; uint32_t *buf; if (cpu < 0 || kt->cpus <= cpu) { error(INFO, "sadump: given cpu is invalid: %d\n", cpu); return FALSE; } if (!symbol_exists("crash_notes")) { error(INFO, "sadump: symbol crash_notes doesn't exist\n"); return FALSE; } crash_notes = symbol_value("crash_notes"); readmem(crash_notes, KVADDR, &crash_notes_ptr, sizeof(ulong), "dereference crash_notes", FAULT_ON_ERROR); if (!crash_notes_ptr) { if (CRASHDEBUG(1)) error(INFO, "sadump: buffer for crash_notes is NULL\n"); return FALSE; } percpu_addr = VALID_STRUCT(percpu_data) ? legacy_per_cpu_ptr(crash_notes_ptr, cpu) : per_cpu_ptr(crash_notes_ptr, cpu); zero_buf = GETBUF(SIZE(note_buf)); BZERO(zero_buf, SIZE(note_buf)); note_buf = GETBUF(SIZE(note_buf)); readmem(percpu_addr, KVADDR, note_buf, SIZE(note_buf), "read crash_notes", FAULT_ON_ERROR); if (memcmp(note_buf, zero_buf, SIZE(note_buf)) == 0) return FALSE; if (BITS64()) { Elf64_Nhdr *note64; note64 = (Elf64_Nhdr *)note_buf; buf = (uint32_t *)note_buf; name = (char *)(note64 + 1); if (note64->n_type != NT_PRSTATUS || note64->n_namesz != strlen("CORE") + 1 || strncmp(name, "CORE", note64->n_namesz) || note64->n_descsz != SIZE(elf_prstatus)) return FALSE; prstatus_ptr = (char *)(buf + (sizeof(*note64) + 3) / 4 + (note64->n_namesz + 3) / 4); } else { Elf32_Nhdr *note32; note32 = (Elf32_Nhdr *)note_buf; buf = (uint32_t *)note_buf; name = (char *)(note32 + 1); if ((note32->n_type != NT_PRSTATUS) && (note32->n_namesz != strlen("CORE") + 1 || strncmp(name, "CORE", note32->n_namesz) || note32->n_descsz != SIZE(elf_prstatus))) return FALSE; prstatus_ptr = (char *)(buf + (sizeof(*note32) + 3) / 4 + (note32->n_namesz + 3) / 4); } memcpy(prstatus, prstatus_ptr, SIZE(elf_prstatus)); return TRUE; } int sadump_get_smram_cpu_state(int apicid, struct sadump_smram_cpu_state *smram) { ulong offset; if (!sd->sub_hdr_offset || !sd->smram_cpu_state_size || apicid >= sd->dump_header->nr_cpus) return FALSE; offset = sd->sub_hdr_offset + sizeof(uint32_t) + sd->dump_header->nr_cpus * sizeof(struct sadump_apic_state); if (lseek(sd->dfd, offset + apicid * sd->smram_cpu_state_size, SEEK_SET) == failed) error(FATAL, "sadump: cannot lseek smram cpu state in dump sub header\n"); if (read(sd->dfd, smram, sd->smram_cpu_state_size) != sd->smram_cpu_state_size) error(FATAL, "sadump: cannot read smram cpu state in dump sub " "header\n"); return TRUE; } static void display_smram_cpu_state(int apicid, struct sadump_smram_cpu_state *s) { fprintf(fp, "APIC ID: %d\n" " RIP: %016llx RSP: %08x%08x RBP: %08x%08x\n" " RAX: %08x%08x RBX: %08x%08x RCX: %08x%08x\n" " RDX: %08x%08x RSI: %08x%08x RDI: %08x%08x\n" " R08: %08x%08x R09: %08x%08x R10: %08x%08x\n" " R11: %08x%08x R12: %08x%08x R13: %08x%08x\n" " R14: %08x%08x R15: %08x%08x\n" " SMM REV: %08x SMM BASE %08x\n" " CS : %08x DS: %08x SS: %08x ES: %08x FS: %08x\n" " GS : %08x\n" " CR0: %016llx CR3: %016llx CR4: %08x\n" " GDT: %08x%08x LDT: %08x%08x IDT: %08x%08x\n" " GDTlim: %08x LDTlim: %08x IDTlim: %08x\n" " LDTR: %08x TR: %08x RFLAGS: %016llx\n" " EPTP: %016llx EPTP_SETTING: %08x\n" " DR6: %016llx DR7: %016llx\n" " Ia32Efer: %016llx\n" " IoMemAddr: %08x%08x IoEip: %016llx\n" " IoMisc: %08x LdtInfo: %08x\n" " IoInstructionRestart: %04x AutoHaltRestart: %04x\n", apicid, (ulonglong)s->Rip, s->RspUpper, s->RspLower, s->RbpUpper, s->RbpLower, s->RaxUpper, s->RaxLower, s->RbxUpper, s->RbxLower, s->RcxUpper, s->RcxLower, s->RdxUpper, s->RdxLower, s->RsiUpper, s->RsiLower, s->RdiUpper, s->RdiLower, s->R8Upper, s->R8Lower, s->R9Upper, s->R9Lower, s->R10Upper, s->R10Lower, s->R11Upper, s->R11Lower, s->R12Upper, s->R12Lower, s->R13Upper, s->R13Lower, s->R14Upper, s->R14Lower, s->R15Upper, s->R15Lower, s->SmmRevisionId, s->Smbase, s->Cs, s->Ds, s->Ss, s->Es, s->Fs, s->Gs, (ulonglong)s->Cr0, (ulonglong)s->Cr3, s->Cr4, s->GdtUpper, s->GdtLower, s->LdtUpper, s->LdtLower, s->IdtUpper, s->IdtLower, s->GdtLimit, s->LdtLimit, s->IdtLimit, s->Ldtr, s->Tr, (ulonglong)s->Rflags, (ulonglong)s->Eptp, s->EptpSetting, (ulonglong)s->Dr6, (ulonglong)s->Dr7, (ulonglong)s->Ia32Efer, s->IoMemAddrUpper, s->IoMemAddrLower, (ulonglong)s->IoEip, s->IoMisc, s->LdtInfo, s->IoInstructionRestart, s->AutoHaltRestart); } static int cpu_to_apicid(int cpu, int *apicid) { struct syment *sym; if (symbol_exists("bios_cpu_apicid")) { uint8_t apicid_u8; readmem(symbol_value("bios_cpu_apicid") + cpu*sizeof(uint8_t), KVADDR, &apicid_u8, sizeof(uint8_t), "bios_cpu_apicid", FAULT_ON_ERROR); *apicid = (int)apicid_u8; if (CRASHDEBUG(1)) error(INFO, "sadump: apicid %u for cpu %d from " "bios_cpu_apicid\n", apicid_u8, cpu); } else if ((sym = per_cpu_symbol_search("x86_bios_cpu_apicid"))) { uint16_t apicid_u16; readmem(early_per_cpu_ptr("x86_bios_cpu_apicid", sym, cpu), KVADDR, &apicid_u16, sizeof(uint16_t), "x86_bios_cpu_apicid", FAULT_ON_ERROR); *apicid = (int)apicid_u16; if (CRASHDEBUG(1)) error(INFO, "sadump: apicid %u for cpu %d from " "x86_bios_cpu_apicid\n", apicid_u16, cpu); } else { if (CRASHDEBUG(1)) error(INFO, "sadump: no symbols for access to apicid\n"); return FALSE; } return TRUE; } static int get_sadump_smram_cpu_state(int cpu, struct sadump_smram_cpu_state *smram) { int apicid = 0; if (cpu < 0 || kt->cpus <= cpu) { error(INFO, "sadump: given cpu is invalid: %d\n", cpu); return FALSE; } if (!cpu_to_apicid(cpu, &apicid)) return FALSE; sadump_get_smram_cpu_state(apicid, smram); return TRUE; } void get_sadump_regs(struct bt_info *bt, ulong *ipp, ulong *spp) { ulong ip, sp; struct sadump_smram_cpu_state smram; char *prstatus; int cpu = bt->tc->processor; if (!is_task_active(bt->task)) { machdep->get_stack_frame(bt, ipp, spp); return; } bt->flags |= BT_DUMPFILE_SEARCH; if (machine_type("X86_64")) machdep->get_stack_frame(bt, ipp, spp); else if (machine_type("X86")) get_netdump_regs_x86(bt, ipp, spp); if (bt->flags & BT_DUMPFILE_SEARCH) return; prstatus = GETBUF(SIZE(elf_prstatus)); if (get_prstatus_from_crash_notes(cpu, prstatus)) { ip = ULONG(prstatus + OFFSET(elf_prstatus_pr_reg) + (BITS64() ? OFFSET(user_regs_struct_rip) : OFFSET(user_regs_struct_eip))); sp = ULONG(prstatus + OFFSET(elf_prstatus_pr_reg) + (BITS64() ? OFFSET(user_regs_struct_rsp) : OFFSET(user_regs_struct_eip))); if (ip || sp) { *ipp = ip; *spp = sp; return; } } get_sadump_smram_cpu_state(cpu, &smram); ip = smram.Rip; sp = ((uint64_t)smram.RspUpper << 32) + smram.RspLower; if (is_kernel_text(ip) && (((sp >= GET_STACKBASE(bt->task)) && (sp < GET_STACKTOP(bt->task))) || in_alternate_stack(bt->tc->processor, sp))) { *ipp = ip; *spp = sp; bt->flags |= BT_KERNEL_SPACE; return; } if (!is_kernel_text(ip) && in_user_stack(bt->tc->task, sp)) bt->flags |= BT_USER_SPACE; } void sadump_display_regs(int cpu, FILE *ofp) { struct sadump_smram_cpu_state smram; if (cpu < 0 || cpu >= kt->cpus) { error(INFO, "sadump: given cpu is invalid: %d\n", cpu); return; } get_sadump_smram_cpu_state(cpu, &smram); if (machine_type("X86_64")) { fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", (ulonglong)(smram.Rip), (ulonglong)(((uint64_t)smram.RspUpper<<32)+smram.RspLower), (ulonglong)(smram.Rflags), (ulonglong)(((uint64_t)smram.RaxUpper<<32)+smram.RaxLower), (ulonglong)(((uint64_t)smram.RbxUpper<<32)+smram.RbxLower), (ulonglong)(((uint64_t)smram.RcxUpper<<32)+smram.RcxLower), (ulonglong)(((uint64_t)smram.RdxUpper<<32)+smram.RdxLower), (ulonglong)(((uint64_t)smram.RsiUpper<<32)+smram.RsiLower), (ulonglong)(((uint64_t)smram.RdiUpper<<32)+smram.RdiLower), (ulonglong)(((uint64_t)smram.RbpUpper<<32)+smram.RbpLower), (ulonglong)(((uint64_t)smram.R8Upper<<32)+smram.R8Lower), (ulonglong)(((uint64_t)smram.R9Upper<<32)+smram.R9Lower), (ulonglong)(((uint64_t)smram.R10Upper<<32)+smram.R10Lower), (ulonglong)(((uint64_t)smram.R11Upper<<32)+smram.R11Lower), (ulonglong)(((uint64_t)smram.R12Upper<<32)+smram.R12Lower), (ulonglong)(((uint64_t)smram.R13Upper<<32)+smram.R13Lower), (ulonglong)(((uint64_t)smram.R14Upper<<32)+smram.R14Lower), (ulonglong)(((uint64_t)smram.R15Upper<<32)+smram.R15Lower), smram.Cs, smram.Ss); } if (machine_type("X86")) { fprintf(ofp, " EAX: %08llx EBX: %08llx ECX: %08llx EDX: %08llx\n" " DS: %04x ESI: %08llx ES: %04x EDI: %08llx\n" " SS: %04x ESP: %08llx EBP: %08llx GS: %04x\n" " CS: %04x EIP: %08llx EFLAGS: %08llx\n", (ulonglong)smram.RaxLower, (ulonglong)smram.RbxLower, (ulonglong)smram.RcxLower, (ulonglong)smram.RdxLower, smram.Ds & 0xffff, (ulonglong)smram.RsiLower, smram.Es & 0xffff, (ulonglong)smram.RdiLower, smram.Ss, (ulonglong)smram.RspLower, (ulonglong)smram.RbpLower, smram.Gs, smram.Cs, (ulonglong)smram.Rip, (ulonglong)smram.Rflags); } } /* * sadump does not save phys_base; it must resort to another way. */ int sadump_phys_base(ulong *phys_base) { if (SADUMP_VALID()) { if (CRASHDEBUG(1)) error(NOTE, "sadump: does not save phys_base.\n"); return FALSE; } return FALSE; } /* * Used by "sys" command to show diskset disk names. */ void sadump_show_diskset(void) { int i; for (i = 0; i < sd->sd_list_len; ++i) { char *filename = sd->sd_list[i]->filename; fprintf(fp, "%s%s", i ? " " : "", filename); if ((i+1) < sd->sd_list_len) fprintf(fp, "\n"); } } static int block_table_init(void) { uint64_t pfn, section, max_section, *block_table; max_section = divideup(sd->max_mapnr, SADUMP_PF_SECTION_NUM); block_table = calloc(sizeof(uint64_t), max_section); if (!block_table) { error(INFO, "sadump: cannot allocate memory for block_table\n"); return FALSE; } for (section = 0; section < max_section; ++section) { if (section > 0) block_table[section] = block_table[section-1]; for (pfn = section * SADUMP_PF_SECTION_NUM; pfn < (section + 1) * SADUMP_PF_SECTION_NUM; ++pfn) if (page_is_dumpable(pfn)) block_table[section]++; } sd->block_table = block_table; return TRUE; } static uint64_t pfn_to_block(uint64_t pfn) { uint64_t block, section, p; section = pfn / SADUMP_PF_SECTION_NUM; if (section) block = sd->block_table[section - 1]; else block = 0; for (p = section * SADUMP_PF_SECTION_NUM; p < pfn; ++p) if (page_is_dumpable(p)) block++; return block; } int sadump_is_zero_excluded(void) { return (sd->flags & SADUMP_ZERO_EXCLUDED) ? TRUE : FALSE; } void sadump_set_zero_excluded(void) { sd->flags |= SADUMP_ZERO_EXCLUDED; } void sadump_unset_zero_excluded(void) { sd->flags &= ~SADUMP_ZERO_EXCLUDED; } struct sadump_data * get_sadump_data(void) { return sd; } crash-7.1.4/s390dbf.c0000664000000000000000000010341612634305150012645 0ustar rootroot/* * s390 debug feature command for crash * * Copyright (C) IBM Corp. 2006 * Author(s): Michael Holzheu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #if defined(S390) || defined(S390X) #include "defs.h" #include #include /* * Compat layer to integrate lcrash commands into crash * Maps lcrash API to crash functions */ #define KL_NBPW sizeof(long) #define KL_ERRORFP stderr #define MAX_ARGS 128 #define MAX_CMDLINE 256 #define C_FALSE 0x00000001 /* Command takes no arguments */ #define C_TRUE 0x00000002 /* Command requires arguments */ #define C_ALL 0x00000004 /* All elements */ #define C_PERM 0x00000008 /* Allocate perminant blocks */ #define C_TEMP 0x00000000 /* For completeness */ #define C_FULL 0x00000010 /* Full output */ #define C_LIST 0x00000020 /* List items */ #define C_NEXT 0x00000040 /* Follow links */ #define C_WRITE 0x00000080 /* Write output to file */ #define C_NO_OPCHECK 0x00000100 /* Don't reject bad cmd line options */ #define C_ITER 0x00000200 /* set iteration threshold */ #define C_LFLG_SHFT 12 #define KL_ARCH_S390 0 #define KL_ARCH_S390X 1 #ifdef __s390x__ #define KL_ARCH KL_ARCH_S390X #define FMTPTR "l" #define KL_PTRSZ 8 #else #define KL_ARCH KL_ARCH_S390 #define FMTPTR "ll" #define KL_PTRSZ 4 #endif typedef unsigned long uaddr_t; typedef unsigned long kaddr_t; typedef struct _syment { char *s_name; kaddr_t s_addr; } syment_t; typedef struct option_s { struct option_s *op_next; char op_char; char *op_arg; } option_t; typedef struct command_s { int flags; char cmdstr[MAX_CMDLINE]; char *command; char *cmdline; option_t *options; int nargs; char *args[MAX_ARGS]; char *pipe_cmd; FILE *ofp; FILE *efp; } command_t; static inline syment_t* kl_lkup_symaddr(kaddr_t addr) { static syment_t sym; struct syment *crash_sym; crash_sym = value_search(addr, &sym.s_addr); if (!crash_sym) return NULL; sym.s_name = crash_sym->name; return &sym; } static inline syment_t* kl_lkup_symname(char* name) { static syment_t sym; sym.s_addr = symbol_value(name); sym.s_name = NULL; if(!sym.s_addr) return NULL; else return &sym; } static inline void GET_BLOCK(kaddr_t addr, int size, void* ptr) { readmem(addr, KVADDR,ptr,size,"GET_BLOCK",FAULT_ON_ERROR); } static inline kaddr_t KL_VREAD_PTR(kaddr_t addr) { unsigned long ptr; readmem(addr, KVADDR,&ptr,sizeof(ptr),"GET_BLOCK",FAULT_ON_ERROR); return (kaddr_t)ptr; } static inline uint32_t KL_GET_UINT32(void* ptr) { return *((uint32_t*)ptr); } static inline uint64_t KL_GET_UINT64(void* ptr) { return *((uint64_t*)ptr); } static inline kaddr_t KL_GET_PTR(void* ptr) { return *((kaddr_t*)ptr); } static inline void* K_PTR(void* addr, char* struct_name, char* member_name) { return addr+MEMBER_OFFSET(struct_name,member_name); } static inline unsigned long KL_ULONG(void* ptr, char* struct_name, char* member_name) { return ULONG(ptr+MEMBER_OFFSET(struct_name,member_name)); } static inline uint32_t KL_VREAD_UINT32(kaddr_t addr) { uint32_t rc; readmem(addr, KVADDR,&rc,sizeof(rc),"KL_VREAD_UINT32",FAULT_ON_ERROR); return rc; } static inline uint32_t KL_INT(void* ptr, char* struct_name, char* member_name) { return UINT(ptr+MEMBER_OFFSET(struct_name,member_name)); } static inline int set_cmd_flags(command_t *cmd, int flags, char *extraops) { return 0; } static inline void kl_s390tod_to_timeval(uint64_t todval, struct timeval *xtime) { todval -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); todval >>= 12; xtime->tv_sec = todval / 1000000; xtime->tv_usec = todval % 1000000; } static inline int kl_struct_len(char* struct_name) { return STRUCT_SIZE(struct_name); } static inline kaddr_t kl_funcaddr(kaddr_t addr) { struct syment *crash_sym; crash_sym = value_search(addr, &addr); if (!crash_sym) return -1; else return crash_sym->value; } #define CMD_USAGE(cmd, s) \ fprintf(cmd->ofp, "Usage: %s %s\n", cmd->command, s); \ fprintf(cmd->ofp, "Enter \"help %s\" for details.\n",cmd->command); /* * s390 debug feature implementation */ #ifdef DBF_DYNAMIC_VIEWS /* views defined in shared libs */ #include #endif /* Local flags */ #define LOAD_FLAG (1 << C_LFLG_SHFT) #define VIEWS_FLAG (2 << C_LFLG_SHFT) #define SAVE_DBF_FLAG (4 << C_LFLG_SHFT) #ifndef MIN #define MIN(a,b) (((a)<(b))?(a):(b)) #endif /* Stuff which has to match with include/asm-s390/debug.h */ #define DBF_VERSION_V1 1 #define DBF_VERSION_V2 2 #define PAGE_SIZE 4096 #define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ #define DEBUG_MAX_PROCF_LEN 64 /* max length for a proc file name */ #define DEBUG_SPRINTF_MAX_ARGS 10 /* define debug-structures for lcrash */ #define DEBUG_DATA(entry) (char*)(entry + 1) typedef struct debug_view_s debug_view_t; /* struct to hold contents of struct __debug_entry from dump */ typedef struct debug_entry_s{ union { struct { unsigned long long clock:52; unsigned long long exception:1; unsigned long long level:3; unsigned long long cpuid:8; } fields; unsigned long long stck; } id; kaddr_t caller; /* changed from void* to kaddr_t */ } __attribute__((packed)) debug_entry_t; /* typedef struct __debug_entry debug_entry_t; */ static unsigned int dbf_version; /* struct is used to manage contents of structs debug_info from dump * in lcrash */ typedef struct debug_info_s { struct debug_info_s *next; struct debug_info_s *prev; kaddr_t next_dbi; /* store next ptr of struct in dump */ kaddr_t prev_dbi; /* store prev ptr of struct in dump */ int level; int nr_areas; int page_order; int buf_size; int entry_size; void **areas; /* contents of debug areas from dump */ int active_area; int *active_entry; /* change to uint32_t ? */ debug_view_t *views[DEBUG_MAX_VIEWS]; char name[DEBUG_MAX_PROCF_LEN]; kaddr_t addr; int pages_per_area_v2; void ***areas_v2; } debug_info_t; /* functions to generate dbf output */ typedef int (debug_header_proc_t) (debug_info_t* id, debug_view_t* view, int area, debug_entry_t* entry, char* out_buf); typedef int (debug_format_proc_t) (debug_info_t* id, debug_view_t* view, char* out_buf, const char* in_buf); typedef int (debug_prolog_proc_t) (debug_info_t* id, debug_view_t* view, char* out_buf); struct debug_view_s { char name[DEBUG_MAX_PROCF_LEN]; debug_prolog_proc_t* prolog_proc; debug_header_proc_t* header_proc; debug_format_proc_t* format_proc; void* private_data; }; #define LCRASH_DB_VIEWS 1000 static debug_info_t *debug_area_first = NULL; static debug_info_t *debug_area_last = NULL; static debug_view_t *debug_views[LCRASH_DB_VIEWS]; static int initialized = 0; static iconv_t ebcdic_ascii_conv = 0; void s390dbf_usage(command_t * cmd); static int add_lcrash_debug_view(debug_view_t *); static int dbe_size = 0; static void EBCASC(char *inout, size_t len) { iconv(ebcdic_ascii_conv, &inout, &len, &inout, &len); } /* * prints header for debug entry */ static int dflt_header_fn(debug_info_t * id, debug_view_t *view, int area, debug_entry_t * entry, char *out_buf) { struct timeval time_val; unsigned long long time; char *except_str; kaddr_t caller; int rc = 0; char *caller_name; int offset; char caller_buf[30]; unsigned int level; syment_t *caller_sym; debug_entry_t lentry; /* store byte swapped values of entry */ lentry.id.stck = KL_GET_UINT64(&entry->id); lentry.caller = KL_GET_PTR(&entry->caller); level = lentry.id.fields.level; time = lentry.id.stck; kl_s390tod_to_timeval(time, &time_val); if (lentry.id.fields.exception) except_str = "*"; else except_str = "-"; caller = lentry.caller; if(KL_ARCH == KL_ARCH_S390){ caller &= 0x7fffffff; } caller_sym = kl_lkup_symaddr(caller); if(caller_sym){ caller_name = caller_sym->s_name; offset = caller - kl_funcaddr(caller); } else { sprintf(caller_buf, "%llx", (unsigned long long)caller); caller_name = caller_buf; offset = 0; } if(KL_ARCH == KL_ARCH_S390X){ rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i <%20s+%04i> ", area, time_val.tv_sec, time_val.tv_usec, level, except_str, entry->id.fields.cpuid, caller_name, offset); } else { rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i <%-20s+%04i> ", area, time_val.tv_sec, time_val.tv_usec, level, except_str, lentry.id.fields.cpuid, caller_name, offset); } return rc; } /* * prints debug header in raw format */ static int raw_header_fn(debug_info_t * id, debug_view_t *view, int area, debug_entry_t * entry, char *out_buf) { int rc; rc = sizeof(debug_entry_t); if (out_buf == NULL) goto out; memcpy(out_buf,entry,sizeof(debug_entry_t)); out: return rc; } /* * prints debug data in raw format */ static int raw_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int rc; rc = id->buf_size; if (out_buf == NULL || in_buf == NULL) goto out; memcpy(out_buf, in_buf, id->buf_size); out: return rc; } /* * prints debug data in hex/ascii format */ static int hex_ascii_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int i, rc = 0; if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size * 4 + 3; goto out; } for (i = 0; i < id->buf_size; i++) { rc += sprintf(out_buf + rc, "%02x ", ((unsigned char *) in_buf)[i]); } rc += sprintf(out_buf + rc, "| "); for (i = 0; i < id->buf_size; i++) { unsigned char c = in_buf[i]; if (isascii(c) && isprint(c)) rc += sprintf(out_buf + rc, "%c", c); else rc += sprintf(out_buf + rc, "."); } rc += sprintf(out_buf + rc, "\n"); out: return rc; } /* * prints debug data in sprintf format */ static int sprintf_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { #define _BUFSIZE 1024 char buf[_BUFSIZE]; int i, k, rc = 0, num_longs = 0, num_strings = 0; int num_used_args ATTRIBUTE_UNUSED; /* use kaddr_t to store long values of 32bit and 64bit archs here */ kaddr_t inbuf_cpy[DEBUG_SPRINTF_MAX_ARGS]; /* store ptrs to strings to be deallocated at end of this function */ uaddr_t to_dealloc[DEBUG_SPRINTF_MAX_ARGS]; kaddr_t addr; memset(buf, 0, sizeof(buf)); memset(inbuf_cpy, 0, sizeof(inbuf_cpy)); memset(to_dealloc, 0, sizeof(to_dealloc)); if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size * 4 + 3; goto out; } /* get the format string into buf */ addr = KL_GET_PTR((void*)in_buf); GET_BLOCK(addr, _BUFSIZE, buf); k = 0; for (i = 0; buf[i] && (buf[i] != '\n'); i++) { if (buf[i] != '%') continue; if (k == DEBUG_SPRINTF_MAX_ARGS) { fprintf(KL_ERRORFP, "\nToo much parameters in sprinf view (%i)\n" ,k + 1); fprintf(KL_ERRORFP, "Format String: %s)\n", buf); break; } /* for sprintf we have only unsigned long values ... */ if (buf[i+1] != 's'){ /* we use KL_GET_PTR here to read ulong value */ addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); inbuf_cpy[k] = addr; } else { /* ... or ptrs to strings in debug areas */ inbuf_cpy[k] = (uaddr_t) malloc(_BUFSIZE); to_dealloc[num_strings++] = inbuf_cpy[k]; addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); GET_BLOCK(addr, _BUFSIZE, (void*)(uaddr_t)(inbuf_cpy[k])); } k++; } /* count of longs fit into one entry */ num_longs = id->buf_size / KL_NBPW; /* sizeof(long); */ if(num_longs < 1) /* bufsize of entry too small */ goto out; if(num_longs == 1) { /* no args, just print the format string */ rc = sprintf(out_buf + rc, "%s", buf); goto out; } /* number of arguments used for sprintf (without the format string) */ num_used_args = MIN(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1)); rc = sprintf(out_buf + rc, buf, (uaddr_t)(inbuf_cpy[0]), (uaddr_t)(inbuf_cpy[1]), (uaddr_t)(inbuf_cpy[2]), (uaddr_t)(inbuf_cpy[3]), (uaddr_t)(inbuf_cpy[4]), (uaddr_t)(inbuf_cpy[5]), (uaddr_t)(inbuf_cpy[6]), (uaddr_t)(inbuf_cpy[7]), (uaddr_t)(inbuf_cpy[8]), (uaddr_t)(inbuf_cpy[9])); out: while (num_strings--){ free((char*)(to_dealloc[num_strings])); } return rc; } /*********************************** * functions for debug-views ***********************************/ /* * prints out actual debug level */ static int prolog_level_fn(debug_info_t * id, debug_view_t *view, char *out_buf) { int rc = 0; if (out_buf == NULL) { rc = 2; goto out; } rc = sprintf(out_buf, "%i\n", id->level); out: return rc; } /* * prints out actual pages_per_area */ static int prolog_pages_fn(debug_info_t * id, debug_view_t *view, char *out_buf) { int rc = 0; if (out_buf == NULL) { rc = 2; goto out; } rc = sprintf(out_buf, "%i\n", id->pages_per_area_v2); out: return rc; } /* * prints out prolog */ static int prolog_fn(debug_info_t * id, debug_view_t *view, char *out_buf) { int rc = 0; rc = sprintf(out_buf, "AREA TIME LEVEL EXCEPTION CP CALLING FUNCTION" " + OFFSET DATA\n===================================" "=======================================\n"); return rc; } /* * prints debug data in hex format */ static int hex_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int i, rc = 0; for (i = 0; i < id->buf_size; i++) { rc += sprintf(out_buf + rc, "%02x ", ((unsigned char *) in_buf)[i]); } rc += sprintf(out_buf + rc, "\n"); return rc; } /* * prints debug data in ascii format */ static int ascii_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int i, rc = 0; if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size + 1; goto out; } for (i = 0; i < id->buf_size; i++) { unsigned char c = in_buf[i]; if (!isprint(c)) rc += sprintf(out_buf + rc, "."); else rc += sprintf(out_buf + rc, "%c", c); } rc += sprintf(out_buf + rc, "\n"); out: return rc; } /* * prints debug data in ebcdic format */ static int ebcdic_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int i, rc = 0; if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size + 1; goto out; } for (i = 0; i < id->buf_size; i++) { char c = in_buf[i]; EBCASC(&c, 1); if (!isprint(c)) rc += sprintf(out_buf + rc, "."); else rc += sprintf(out_buf + rc, "%c", c); } rc += sprintf(out_buf + rc, "\n"); out: return rc; } debug_view_t ascii_view = { "ascii", &prolog_fn, &dflt_header_fn, &ascii_format_fn, }; debug_view_t ebcdic_view = { "ebcdic", &prolog_fn, &dflt_header_fn, &ebcdic_format_fn, }; debug_view_t hex_view = { "hex", &prolog_fn, &dflt_header_fn, &hex_format_fn, }; debug_view_t level_view = { "level", &prolog_level_fn, NULL, NULL, }; debug_view_t pages_view = { "pages", &prolog_pages_fn, NULL, NULL, }; debug_view_t raw_view = { "raw", NULL, &raw_header_fn, &raw_format_fn, }; debug_view_t hex_ascii_view = { "hex_ascii", &prolog_fn, &dflt_header_fn, &hex_ascii_format_fn, }; debug_view_t sprintf_view = { "sprintf", &prolog_fn, &dflt_header_fn, &sprintf_format_fn, }; static debug_entry_t * debug_find_oldest_entry(debug_entry_t *entries, int num, int entry_size) { debug_entry_t *result, *current; int i; uint64_t clock1, clock2; result = entries; current = entries; for (i=0; i < num; i++) { if (current->id.stck == 0) break; clock1 = current->id.fields.clock; clock2 = result->id.fields.clock; clock1 = KL_GET_UINT64(&clock1); clock2 = KL_GET_UINT64(&clock2); if (clock1 < clock2) result = current; current = (debug_entry_t *) ((char *) current + entry_size); } return result; } /* * debug_format_output: * - calls prolog, header and format functions of view to format output */ static int debug_format_output_v1(debug_info_t * debug_area, debug_view_t *view, FILE * ofp) { int i, j, len; int nr_of_entries; debug_entry_t *act_entry, *last_entry; char *act_entry_data; char buf[2048]; size_t items ATTRIBUTE_UNUSED; /* print prolog */ if (view->prolog_proc) { len = view->prolog_proc(debug_area, view, buf); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } /* print debug records */ if (!(view->format_proc) && !(view->header_proc)) goto out; if(debug_area->entry_size <= 0){ fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size); goto out; } nr_of_entries = (PAGE_SIZE << debug_area->page_order) / debug_area->entry_size; for (i = 0; i < debug_area->nr_areas; i++) { act_entry = debug_find_oldest_entry(debug_area->areas[i], nr_of_entries, debug_area->entry_size); last_entry = (debug_entry_t *) ((char *) debug_area->areas[i] + (PAGE_SIZE << debug_area->page_order) - debug_area->entry_size); for (j = 0; j < nr_of_entries; j++) { act_entry_data = (char*)act_entry + dbe_size; if (act_entry->id.stck == 0) break; /* empty entry */ if (view->header_proc) { len = view->header_proc(debug_area, view, i, act_entry, buf); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } if (view->format_proc) { len = view->format_proc(debug_area, view, buf, act_entry_data); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } act_entry = (debug_entry_t *) (((char *) act_entry) + debug_area->entry_size); if (act_entry > last_entry) act_entry = debug_area->areas[i]; } } out: return 1; } /* * debug_format_output_v2: * - calls prolog, header and format functions of view to format output */ static int debug_format_output_v2(debug_info_t * debug_area, debug_view_t *view, FILE * ofp) { int i, j, k, len; debug_entry_t *act_entry; char *act_entry_data; char buf[2048]; size_t items ATTRIBUTE_UNUSED; /* print prolog */ if (view->prolog_proc) { len = view->prolog_proc(debug_area, view, buf); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } /* print debug records */ if (!(view->format_proc) && !(view->header_proc)) goto out; if(debug_area->entry_size <= 0){ fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size); goto out; } for (i = 0; i < debug_area->nr_areas; i++) { int nr_entries_per_page = PAGE_SIZE/debug_area->entry_size; for (j = 0; j < debug_area->pages_per_area_v2; j++) { act_entry = debug_area->areas_v2[i][j]; for (k = 0; k < nr_entries_per_page; k++) { act_entry_data = (char*)act_entry + dbe_size; if (act_entry->id.stck == 0) break; /* empty entry */ if (view->header_proc) { len = view->header_proc(debug_area, view, i, act_entry, buf); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } if (view->format_proc) { len = view->format_proc(debug_area, view, buf, act_entry_data); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } act_entry = (debug_entry_t *) (((char *) act_entry) + debug_area->entry_size); } } } out: return 1; } static debug_info_t * find_debug_area(const char *area_name) { debug_info_t* act_debug_info = debug_area_first; while(act_debug_info != NULL){ if (strcmp(act_debug_info->name, area_name) == 0) return act_debug_info; act_debug_info = act_debug_info->next; } return NULL; } static void dbf_init(void) { if (!initialized) { if(dbf_version >= DBF_VERSION_V2) add_lcrash_debug_view(&pages_view); add_lcrash_debug_view(&ascii_view); add_lcrash_debug_view(&level_view); add_lcrash_debug_view(&ebcdic_view); add_lcrash_debug_view(&hex_view); add_lcrash_debug_view(&hex_ascii_view); add_lcrash_debug_view(&sprintf_view); add_lcrash_debug_view(&raw_view); ebcdic_ascii_conv = iconv_open("ISO-8859-1", "EBCDIC-US"); initialized = 1; } } static debug_view_t* get_debug_view(kaddr_t addr) { void* k_debug_view; int k_debug_view_size; debug_view_t* rc; rc = (debug_view_t*)malloc(sizeof(debug_view_t)); memset(rc, 0, sizeof(debug_view_t)); k_debug_view_size = kl_struct_len("debug_view"); k_debug_view = malloc(k_debug_view_size); GET_BLOCK(addr, k_debug_view_size, k_debug_view); strncpy(rc->name,K_PTR(k_debug_view,"debug_view","name"), DEBUG_MAX_PROCF_LEN); free(k_debug_view); return rc; } static void free_debug_view(debug_view_t* view) { if(view) free(view); } static void debug_get_areas_v1(debug_info_t* db_info, void* k_dbi) { kaddr_t mem_pos; kaddr_t dbe_addr; int area_size, i; /* get areas */ /* place to hold ptrs to debug areas in lcrash */ area_size = PAGE_SIZE << db_info->page_order; db_info->areas = (void**)malloc(db_info->nr_areas * sizeof(void *)); memset(db_info->areas, 0, db_info->nr_areas * sizeof(void *)); mem_pos = KL_ULONG(k_dbi,"debug_info","areas"); for (i = 0; i < db_info->nr_areas; i++) { dbe_addr = KL_VREAD_PTR(mem_pos); db_info->areas[i] = (debug_entry_t *) malloc(area_size); /* read raw data for debug area */ GET_BLOCK(dbe_addr, area_size, db_info->areas[i]); mem_pos += KL_NBPW; } } static void debug_get_areas_v2(debug_info_t* db_info, void* k_dbi) { kaddr_t area_ptr; kaddr_t page_array_ptr; kaddr_t page_ptr; int i,j; db_info->areas_v2=(void***)malloc(db_info->nr_areas * sizeof(void **)); area_ptr = KL_ULONG(k_dbi,"debug_info","areas"); for (i = 0; i < db_info->nr_areas; i++) { db_info->areas_v2[i] = (void**)malloc(db_info->pages_per_area_v2 * sizeof(void*)); page_array_ptr = KL_VREAD_PTR(area_ptr); for(j=0; j < db_info->pages_per_area_v2; j++) { page_ptr = KL_VREAD_PTR(page_array_ptr); db_info->areas_v2[i][j] = (void*)malloc(PAGE_SIZE); /* read raw data for debug area */ GET_BLOCK(page_ptr, PAGE_SIZE, db_info->areas_v2[i][j]); page_array_ptr += KL_NBPW; } area_ptr += KL_NBPW; } } static debug_info_t* get_debug_info(kaddr_t addr,int get_areas) { void *k_dbi; kaddr_t mem_pos; kaddr_t view_addr; debug_info_t* db_info; int i; int dbi_size; /* get sizes of kernel structures */ if(!(dbi_size = kl_struct_len("debug_info"))){ fprintf (KL_ERRORFP, "Could not determine sizeof(struct debug_info)\n"); return(NULL); } if(!(dbe_size = kl_struct_len("__debug_entry"))){ fprintf(KL_ERRORFP, "Could not determine sizeof(struct __debug_entry)\n"); return(NULL); } /* get kernel debug_info structure */ k_dbi = malloc(dbi_size); GET_BLOCK(addr, dbi_size, k_dbi); db_info = (debug_info_t*)malloc(sizeof(debug_info_t)); memset(db_info, 0, sizeof(debug_info_t)); /* copy members */ db_info->level = KL_INT(k_dbi,"debug_info","level"); db_info->nr_areas = KL_INT(k_dbi,"debug_info","nr_areas"); db_info->pages_per_area_v2= KL_INT(k_dbi,"debug_info","pages_per_area"); db_info->page_order = KL_INT(k_dbi,"debug_info","page_order"); db_info->buf_size = KL_INT(k_dbi,"debug_info","buf_size"); db_info->entry_size = KL_INT(k_dbi,"debug_info","entry_size"); db_info->next_dbi = KL_ULONG(k_dbi,"debug_info","next"); db_info->prev_dbi = KL_ULONG(k_dbi,"debug_info","prev"); db_info->addr = addr; strncpy(db_info->name,K_PTR(k_dbi,"debug_info","name"), DEBUG_MAX_PROCF_LEN); if(get_areas){ if(dbf_version == DBF_VERSION_V1) debug_get_areas_v1(db_info,k_dbi); else debug_get_areas_v2(db_info,k_dbi); } else { db_info->areas = NULL; } /* get views */ mem_pos = (uaddr_t) K_PTR(k_dbi,"debug_info","views"); memset(&db_info->views, 0, DEBUG_MAX_VIEWS * sizeof(void*)); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { view_addr = KL_GET_PTR((void*)(uaddr_t)mem_pos); if(view_addr == 0){ break; } else { db_info->views[i] = get_debug_view(view_addr); } mem_pos += KL_NBPW; } free(k_dbi); return db_info; } static void free_debug_info_v1(debug_info_t * db_info) { int i; if(db_info->areas){ for (i = 0; i < db_info->nr_areas; i++) { free(db_info->areas[i]); } } for (i = 0; i < DEBUG_MAX_VIEWS; i++) { free_debug_view(db_info->views[i]); } free(db_info->areas); free(db_info); } static void free_debug_info_v2(debug_info_t * db_info) { int i,j; if(db_info->areas) { for (i = 0; i < db_info->nr_areas; i++) { for(j = 0; j < db_info->pages_per_area_v2; j++) { free(db_info->areas_v2[i][j]); } free(db_info->areas[i]); } free(db_info->areas); db_info->areas = NULL; } for (i = 0; i < DEBUG_MAX_VIEWS; i++) { free_debug_view(db_info->views[i]); } free(db_info); } static void debug_write_output(debug_info_t *db_info, debug_view_t *db_view, FILE * fp) { if (dbf_version == DBF_VERSION_V1) { debug_format_output_v1(db_info, db_view, fp); free_debug_info_v1(db_info); } else { debug_format_output_v2(db_info, db_view, fp); free_debug_info_v2(db_info); } } static int get_debug_areas(void) { kaddr_t act_debug_area; syment_t *debug_sym; debug_info_t *act_debug_area_cpy; if(!(debug_sym = kl_lkup_symname("debug_area_first"))){ printf("Did not find debug_areas"); return -1; } act_debug_area = KL_VREAD_PTR(debug_sym->s_addr); while(act_debug_area != 0){ act_debug_area_cpy = get_debug_info(act_debug_area,0); act_debug_area = act_debug_area_cpy->next_dbi; if(debug_area_first == NULL){ debug_area_first = act_debug_area_cpy; } else { debug_area_last->next = act_debug_area_cpy; } debug_area_last = act_debug_area_cpy; } return 0; } static void free_debug_areas(void) { debug_info_t* next; debug_info_t* act_debug_info = debug_area_first; while(act_debug_info != NULL){ next = act_debug_info->next; if(dbf_version == DBF_VERSION_V1) free_debug_info_v1(act_debug_info); else free_debug_info_v2(act_debug_info); act_debug_info = next; } debug_area_first = NULL; debug_area_last = NULL; } static debug_view_t * find_lcrash_debug_view(const char *name) { int i; for (i = 0; (i < LCRASH_DB_VIEWS) && (debug_views[i] != NULL); i++) { if (strcmp(debug_views[i]->name, name) == 0) return debug_views[i]; } return NULL; } static void print_lcrash_debug_views(FILE * ofp) { int i; fprintf(ofp, "REGISTERED VIEWS\n"); fprintf(ofp, "=====================\n"); for (i = 0; i < LCRASH_DB_VIEWS; i++) { if (debug_views[i] == NULL) { return; } fprintf(ofp, " - %s\n", debug_views[i]->name); } } static int add_lcrash_debug_view(debug_view_t *view) { int i; for (i = 0; i < LCRASH_DB_VIEWS; i++) { if (debug_views[i] == NULL) { debug_views[i] = view; return 0; } if (strcmp(debug_views[i]->name, view->name) == 0) return -1; } return -1; } static int list_one_view(char *area_name, char *view_name, command_t * cmd) { debug_info_t *db_info; debug_view_t *db_view; if ((db_info = find_debug_area(area_name)) == NULL) { fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); return -1; } db_info = get_debug_info(db_info->addr,1); if ((db_view = find_lcrash_debug_view(view_name)) == NULL) { fprintf(cmd->efp, "View '%s' not registered!\n", view_name); return -1; } debug_write_output(db_info, db_view, cmd->ofp); return 0; } static int list_areas(FILE * ofp) { debug_info_t* act_debug_info = debug_area_first; fprintf(ofp, "Debug Logs:\n"); fprintf(ofp, "==================\n"); while(act_debug_info != NULL){ fprintf(ofp, " - %s\n", act_debug_info->name); act_debug_info = act_debug_info->next; } return 0; } static int list_one_area(const char *area_name, command_t * cmd) { debug_info_t *db_info; int i; if ((db_info = find_debug_area(area_name)) == NULL) { fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); return -1; } fprintf(cmd->ofp, "INSTALLED VIEWS FOR '%s':\n", area_name); fprintf(cmd->ofp, "================================================" "==============================\n"); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (db_info->views[i] != NULL) { fprintf(cmd->ofp, " - %s ", db_info->views[i]->name); if (find_lcrash_debug_view(db_info->views[i]->name)) fprintf(cmd->ofp, "(available)\n"); else fprintf(cmd->ofp, "(not available)\n"); } } fprintf(cmd->ofp, "=================================================" "=============================\n"); return 0; } #ifdef DBF_DYNAMIC_VIEWS static int load_debug_view(const char *path, command_t * cmd) { void *library; const char *error; debug_view_t *(*view_init_func) (void); library = dlopen(path, RTLD_LAZY); if (library == NULL) { fprintf(cmd->efp, "Could not open %s: %s\n", path, dlerror()); return (1); } dlerror(); view_init_func = dlsym(library, "debug_view_init"); error = dlerror(); if (error) { fprintf(stderr, "could not find debug_view_init(): %s\n", error); exit(1); } add_lcrash_debug_view((*view_init_func) ()); fprintf(cmd->ofp, "view %s loaded\n", path); fflush(stdout); return 0; } #endif static int save_one_view(const char *dbf_dir_name, const char *area_name, const char *view_name, command_t *cmd) { char path_view[PATH_MAX]; debug_info_t *db_info; debug_view_t *db_view; FILE *view_fh; db_info = find_debug_area(area_name); if (db_info == NULL) { fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); return -1; } db_info = get_debug_info(db_info->addr, 1); db_view = find_lcrash_debug_view(view_name); if (db_view == NULL) { fprintf(cmd->efp, "View '%s' not registered!\n", view_name); return -1; } sprintf(path_view, "%s/%s/%s", dbf_dir_name, area_name, view_name); view_fh = fopen(path_view, "w"); if (view_fh == NULL) { fprintf(cmd->efp, "Could not create file: %s (%s)\n", path_view, strerror(errno)); return -1; } debug_write_output(db_info, db_view, view_fh); fclose(view_fh); return 0; } static int save_one_area(const char *dbf_dir_name, const char *area_name, command_t *cmd) { char dir_name_area[PATH_MAX]; debug_info_t *db_info; int i; db_info = find_debug_area(area_name); if (db_info == NULL) { fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); return -1; } sprintf(dir_name_area, "%s/%s", dbf_dir_name, area_name); if (mkdir(dir_name_area, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) != 0) { fprintf(cmd->efp, "Could not create directory: %s (%s)\n", dir_name_area, strerror(errno)); return -1; } for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (db_info->views[i] == NULL) continue; if (!find_lcrash_debug_view(db_info->views[i]->name)) continue; save_one_view(dbf_dir_name, area_name, db_info->views[i]->name, cmd); } return 0; } static void save_dbf(const char *dbf_dir_name, command_t *cmd) { debug_info_t *act_debug_info = debug_area_first; FILE *ofp = cmd->ofp; fprintf(ofp, "Saving s390dbf to directory \"%s\"\n", dbf_dir_name); if (mkdir(dbf_dir_name, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) != 0) { fprintf(cmd->efp, "Could not create directory: %s (%s)\n", dbf_dir_name, strerror(errno)); return; } while (act_debug_info != NULL) { save_one_area(dbf_dir_name, act_debug_info->name, cmd); act_debug_info = act_debug_info->next; } } /* * s390dbf_cmd() -- Run the 's390dbf' command. */ static int s390dbf_cmd(command_t * cmd) { syment_t *dbf_version_sym; int rc = 0; /* check version */ if(!(dbf_version_sym = kl_lkup_symname("debug_feature_version"))){ fprintf(KL_ERRORFP, "Could not determine debug_feature_version\n"); return -1; } dbf_version = KL_VREAD_UINT32(dbf_version_sym->s_addr); if ((dbf_version != DBF_VERSION_V1) && (dbf_version != DBF_VERSION_V2)){ fprintf(cmd->efp,"lcrash does not support the" " debug feature version of the dump kernel:\n"); fprintf(cmd->efp,"DUMP: %i SUPPORTED: %i and %i\n", dbf_version, DBF_VERSION_V1, DBF_VERSION_V2); return -1; } dbf_init(); if (cmd->flags & C_ALL) { return (0); } #ifdef DBF_DYNAMIC_VIEWS if (cmd->flags & LOAD_FLAG) { printf("loading: %s\n", cmd->args[0]); return (load_debug_view(cmd->args[0], cmd)); } #endif if (cmd->flags & VIEWS_FLAG) { print_lcrash_debug_views(cmd->ofp); return (0); } if (cmd->nargs > 2) { s390dbf_usage(cmd); return (1); } if(get_debug_areas() == -1) return -1; if (cmd->flags & SAVE_DBF_FLAG) { if (cmd->nargs != 2) { fprintf(cmd->efp, "Specify directory name for -s\n"); return 1; } save_dbf(cmd->args[1], cmd); return 0; } switch (cmd->nargs) { case 0: rc = list_areas(cmd->ofp); break; case 1: rc = list_one_area(cmd->args[0], cmd); break; case 2: rc = list_one_view(cmd->args[0], cmd->args[1], cmd); break; } free_debug_areas(); return rc; } #define _S390DBF_USAGE " [-v] [-s dirname] [debug log] [debug view]" /* * s390dbf_usage() -- Print the usage string for the 's390dbf' command. */ void s390dbf_usage(command_t * cmd) { CMD_USAGE(cmd, _S390DBF_USAGE); } /* * s390 debug feature command for crash */ char *help_s390dbf[] = { "s390dbf", "s390dbf prints out debug feature logs", "[-v] [-s dirname] [debug log] [debug view]" "", "Display Debug logs:", " + If called without parameters, all active debug logs are listed.", " + If called with the name of a debug log, all debug-views for which", " the debug-log has registered are listed. It is possible thatsome", " of the debug views are not available to 'crash'.", " + If called with the name of a debug-log and an available viewname,", " the specified view is printed.", " + If called with '-s dirname', the s390dbf is saved to the specified", " directory", " + If called with '-v', all debug views which are available to", " 'crash' are listed", NULL }; void cmd_s390dbf() { int i,c; command_t cmd = { .ofp = fp, .efp = stderr, .cmdstr = "s390dbf", .command = "s390dbf", }; cmd.nargs=argcnt - 1; for (i=1; i < argcnt; i++) cmd.args[i-1] = args[i]; while ((c = getopt(argcnt, args, "vs")) != EOF) { switch(c) { case 'v': cmd.flags |= VIEWS_FLAG; break; case 's': cmd.flags |= SAVE_DBF_FLAG; break; default: s390dbf_usage(&cmd); return; } } s390dbf_cmd(&cmd); } #endif crash-7.1.4/ipcs.c0000664000000000000000000007346212634305150012440 0ustar rootroot/* ipcs.c - provide information on ipc facilities * * Copyright (C) 2012 FUJITSU LIMITED * Auther: Qiao Nuohan * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* From the crash source top-level directory */ #define SPECIFIED_NOTHING 0x0 #define SPECIFIED_ID 0x1 #define SPECIFIED_ADDR 0x2 #define IPCS_INIT 0x1 #define MAX_ID_SHIFT (sizeof(int)*8 - 1) #define MAX_ID_BIT (1U << MAX_ID_SHIFT) #define MAX_ID_MASK (MAX_ID_BIT - 1) #define SHM_DEST 01000 #define SHM_LOCKED 02000 struct shm_info { ulong shmid_kernel; int key; int shmid; ulong rss; ulong swap; unsigned int uid; unsigned int perms; ulong bytes; ulong nattch; ulong shm_inode; int deleted; }; struct sem_info { ulong sem_array; int key; int semid; unsigned int uid; unsigned int perms; ulong nsems; int deleted; }; struct msg_info { ulong msg_queue; int key; int msgid; unsigned int uid; unsigned int perms; ulong bytes; ulong messages; int deleted; }; struct ipcs_table { int idr_bits; ulong init_flags; ulong hugetlbfs_f_op_addr; ulong shm_f_op_addr; ulong shm_f_op_huge_addr; int use_shm_f_op; int seq_multiplier; }; /* * function declaration */ static void ipcs_init(void); static int dump_shared_memory(int, ulong, int, ulong); static int dump_semaphore_arrays(int, ulong, int, ulong); static int dump_message_queues(int, ulong, int, ulong); static int ipc_search_idr(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); static int ipc_search_array(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); static ulong idr_find(ulong, int); static int dump_shm_info(ulong, int, ulong, int, int); static int dump_sem_info(ulong, int, ulong, int, int); static int dump_msg_info(ulong, int, ulong, int, int); static void get_shm_info(struct shm_info *, ulong, int); static void get_sem_info(struct sem_info *, ulong, int); static void get_msg_info(struct msg_info *, ulong, int); static void add_rss_swap(ulong, int, ulong *, ulong *); static int is_file_hugepages(ulong); /* * global data */ static struct ipcs_table ipcs_table = { 0 }; static void ipcs_init(void) { if (ipcs_table.init_flags & IPCS_INIT) { return; } ipcs_table.init_flags |= IPCS_INIT; MEMBER_OFFSET_INIT(file_f_op, "file", "f_op"); MEMBER_OFFSET_INIT(file_private_data, "file", "private_data"); MEMBER_OFFSET_INIT(hstate_order, "hstate", "order"); MEMBER_OFFSET_INIT(hugetlbfs_sb_info_hstate, "hugetlbfs_sb_info", "hstate"); MEMBER_OFFSET_INIT(idr_layers, "idr", "layers"); MEMBER_OFFSET_INIT(idr_layer_layer, "idr_layer", "layer"); MEMBER_OFFSET_INIT(idr_layer_ary, "idr_layer", "ary"); MEMBER_OFFSET_INIT(idr_top, "idr", "top"); MEMBER_OFFSET_INIT(ipc_id_ary_p, "ipc_id_ary", "p"); MEMBER_OFFSET_INIT(ipc_ids_entries, "ipc_ids", "entries"); MEMBER_OFFSET_INIT(ipc_ids_max_id, "ipc_ids", "max_id"); MEMBER_OFFSET_INIT(ipc_ids_in_use, "ipc_ids", "in_use"); MEMBER_OFFSET_INIT(ipc_ids_ipcs_idr, "ipc_ids", "ipcs_idr"); MEMBER_OFFSET_INIT(ipc_namespace_ids, "ipc_namespace", "ids"); MEMBER_OFFSET_INIT(kern_ipc_perm_key, "kern_ipc_perm", "key"); MEMBER_OFFSET_INIT(kern_ipc_perm_id, "kern_ipc_perm", "id"); MEMBER_OFFSET_INIT(kern_ipc_perm_uid, "kern_ipc_perm", "uid"); MEMBER_OFFSET_INIT(kern_ipc_perm_mode, "kern_ipc_perm", "mode"); MEMBER_OFFSET_INIT(kern_ipc_perm_deleted, "kern_ipc_perm", "deleted"); MEMBER_OFFSET_INIT(kern_ipc_perm_seq, "kern_ipc_perm", "seq"); MEMBER_OFFSET_INIT(nsproxy_ipc_ns, "nsproxy", "ipc_ns"); MEMBER_OFFSET_INIT(shmem_inode_info_vfs_inode, "shmem_inode_info", "vfs_inode"); MEMBER_OFFSET_INIT(shmem_inode_info_swapped, "shmem_inode_info", "swapped"); if (INVALID_MEMBER(shmem_inode_info_swapped)) ANON_MEMBER_OFFSET_INIT(shmem_inode_info_swapped, "shmem_inode_info", "swapped"); MEMBER_OFFSET_INIT(shm_file_data_file, "shm_file_data", "file"); MEMBER_OFFSET_INIT(shmid_kernel_shm_perm, "shmid_kernel", "shm_perm"); MEMBER_OFFSET_INIT(shmid_kernel_shm_segsz, "shmid_kernel", "shm_segsz"); MEMBER_OFFSET_INIT(shmid_kernel_shm_nattch, "shmid_kernel", "shm_nattch"); MEMBER_OFFSET_INIT(shmid_kernel_shm_file, "shmid_kernel", "shm_file"); MEMBER_OFFSET_INIT(shmid_kernel_id, "shmid_kernel", "id"); MEMBER_OFFSET_INIT(sem_array_sem_perm, "sem_array", "sem_perm"); MEMBER_OFFSET_INIT(sem_array_sem_id, "sem_array", "sem_id"); MEMBER_OFFSET_INIT(sem_array_sem_nsems, "sem_array", "sem_nsems"); MEMBER_OFFSET_INIT(msg_queue_q_perm, "msg_queue", "q_perm"); MEMBER_OFFSET_INIT(msg_queue_q_id, "msg_queue", "q_id"); MEMBER_OFFSET_INIT(msg_queue_q_cbytes, "msg_queue", "q_cbytes"); MEMBER_OFFSET_INIT(msg_queue_q_qnum, "msg_queue", "q_qnum"); MEMBER_OFFSET_INIT(super_block_s_fs_info, "super_block", "s_fs_info"); /* * struct size */ STRUCT_SIZE_INIT(ipc_ids, "ipc_ids"); STRUCT_SIZE_INIT(shmid_kernel, "shmid_kernel"); STRUCT_SIZE_INIT(sem_array, "sem_array"); STRUCT_SIZE_INIT(msg_queue, "msg_queue"); STRUCT_SIZE_INIT(hstate, "hstate"); if (symbol_exists("hugetlbfs_file_operations")) ipcs_table.hugetlbfs_f_op_addr = symbol_value("hugetlbfs_file_operations"); if (symbol_exists("is_file_shm_hugepages")) { ipcs_table.use_shm_f_op = TRUE; ipcs_table.shm_f_op_addr = symbol_value("shm_file_operations"); if (symbol_exists("shm_file_operations_huge")) { ipcs_table.shm_f_op_huge_addr = symbol_value("shm_file_operations_huge"); } else { ipcs_table.shm_f_op_huge_addr = -1; } } else { ipcs_table.use_shm_f_op = FALSE; ipcs_table.shm_f_op_addr = -1; ipcs_table.shm_f_op_huge_addr = -1; } if (BITS32()) ipcs_table.idr_bits = 5; else if (BITS64()) ipcs_table.idr_bits = 6; else error(FATAL, "machdep->bits is not 32 or 64"); ipcs_table.seq_multiplier = 32768; } /* * Arguments are passed to the command functions in the global args[argcnt] * array. See getopt(3) for info on dash arguments. Check out defs.h and * other crash commands for usage of the myriad of utility routines available * to accomplish what your task. */ void cmd_ipcs(void) { int specified; char *specified_value[MAXARGS]; int value_index; int c; int shm, sem, msg, verbose; int i; ulong value, task; int found; struct task_context *tc; char buf[BUFSIZE]; value_index = 0; specified = SPECIFIED_NOTHING; shm = 0; sem = 0; msg = 0; verbose = 0; tc = NULL; while ((c = getopt(argcnt, args, "smMqn:")) != EOF) { switch(c) { case 's': sem = 1; break; case 'm': shm = 1; break; case 'M': shm = 1; verbose = 1; break; case 'q': msg = 1; break; case 'n': switch (str_to_context(optarg, &value, &tc)) { case STR_PID: case STR_TASK: break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; default: cmd_usage(pc->curcmd, SYNOPSIS);; return; } } while (args[optind]) { if (value_index >= MAXARGS) error(FATAL, "too many id/member specified\n"); specified |= SPECIFIED_ID | SPECIFIED_ADDR; specified_value[value_index] = args[optind]; stol(args[optind], FAULT_ON_ERROR, NULL); optind++; value_index++; } if (THIS_KERNEL_VERSION < LINUX(2,6,0)) command_not_supported(); ipcs_init(); if (!shm && !sem && !msg) shm = sem = msg = 1; task = tc ? tc->task : pid_to_task(0); if (!value_index) { if (shm) dump_shared_memory(specified, 0, verbose, task); if (sem) dump_semaphore_arrays(specified, 0, 0, task); if (msg) dump_message_queues(specified, 0, 0, task); } else { open_tmpfile(); i = 0; while (i < value_index) { found = 0; value = stol(specified_value[i], FAULT_ON_ERROR, NULL); if (shm) found += dump_shared_memory(specified, value, verbose, task); if (sem) found += dump_semaphore_arrays(specified, value, 0, task); if (msg) found += dump_message_queues(specified, value, 0, task); if (!found) fprintf(pc->saved_fp, "invalid id or address: %s\n\n", specified_value[i]); i++; } fflush(fp); rewind(fp); while (fgets(buf, BUFSIZE, fp)) fprintf(pc->saved_fp, "%s", buf); close_tmpfile(); } } static int dump_shared_memory(int specified, ulong specified_value, int verbose, ulong task) { ulong nsproxy_p, ipc_ns_p; ulong ipc_ids_p; int (*ipc_search)(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); int (*dump_shm)(ulong, int, ulong, int, int); char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; char buf7[BUFSIZE]; if (!verbose && specified == SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=12?12:VADDR_PRLEN, LJUST, "SHMID_KERNEL"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SHMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "BYTES"), mkstring(buf6, 6, LJUST, "NATTCH"), mkstring(buf7, 6, LJUST, "STATUS")); } dump_shm = dump_shm_info; if (VALID_MEMBER(kern_ipc_perm_id)) { ipc_search = ipc_search_idr; } else { ipc_search = ipc_search_array; } if (symbol_exists("shm_ids")) { ipc_ids_p = symbol_value("shm_ids"); } else { readmem(task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy_p, sizeof(ulong), "task_struct.nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy_p + OFFSET(nsproxy_ipc_ns), KVADDR, &ipc_ns_p, sizeof(ulong), "nsproxy.ipc_ns", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine ipc_namespace location!\n"); if (MEMBER_SIZE("ipc_namespace","ids") == sizeof(ulong) * 3) readmem(ipc_ns_p + OFFSET(ipc_namespace_ids) + sizeof(ulong) * 2, KVADDR, &ipc_ids_p, sizeof(ulong), "ipc_namespace.ids[2]", FAULT_ON_ERROR); else ipc_ids_p = ipc_ns_p + OFFSET(ipc_namespace_ids) + 2 * SIZE(ipc_ids); } if (ipc_search(ipc_ids_p, specified, specified_value, dump_shm, verbose)) { return 1; } else { if (verbose && specified == SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=12?12:VADDR_PRLEN, LJUST, "SHMID_KERNEL"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SHMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "BYTES"), mkstring(buf6, 6, LJUST, "NATTCH"), mkstring(buf7, 6, LJUST, "STATUS")); fprintf(fp, "(none allocated)\n\n"); } return 0; } } static int dump_semaphore_arrays(int specified, ulong specified_value, int verbose, ulong task) { ulong nsproxy_p, ipc_ns_p; ulong ipc_ids_p; int (*ipc_search)(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); int (*dump_sem)(ulong, int, ulong, int, int); char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; if (specified == SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=10?10:VADDR_PRLEN, LJUST, "SEM_ARRAY"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SEMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "NSEMS")); } dump_sem = dump_sem_info; if (VALID_MEMBER(kern_ipc_perm_id)) { ipc_search = ipc_search_idr; } else { ipc_search = ipc_search_array; } if (symbol_exists("sem_ids")) { ipc_ids_p = symbol_value("sem_ids"); } else { readmem(task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy_p, sizeof(ulong), "task_struct.nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy_p + OFFSET(nsproxy_ipc_ns), KVADDR, &ipc_ns_p, sizeof(ulong), "nsproxy.ipc_ns", FAULT_ON_ERROR|QUIET)) error(FATAL, "cannot determine ipc_namespace location!\n"); if (MEMBER_SIZE("ipc_namespace","ids") == sizeof(ulong) * 3) readmem(ipc_ns_p + OFFSET(ipc_namespace_ids), KVADDR, &ipc_ids_p, sizeof(ulong), "ipc_namespace.ids[2]", FAULT_ON_ERROR); else ipc_ids_p = ipc_ns_p + OFFSET(ipc_namespace_ids); } return ipc_search(ipc_ids_p, specified, specified_value, dump_sem, verbose); } static int dump_message_queues(int specified, ulong specified_value, int verbose, ulong task) { ulong nsproxy_p, ipc_ns_p; ulong ipc_ids_p; int (*ipc_search)(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); int (*dump_msg)(ulong, int, ulong, int, int); char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; if (specified == SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=10?10:VADDR_PRLEN, LJUST, "MSG_QUEUE"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "MSQID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 12, LJUST, "USED-BYTES"), mkstring(buf6, 12, LJUST, "MESSAGES")); } dump_msg = dump_msg_info; if (VALID_MEMBER(kern_ipc_perm_id)) { ipc_search = ipc_search_idr; } else { ipc_search = ipc_search_array; } if (symbol_exists("msg_ids")) { ipc_ids_p = symbol_value("msg_ids"); } else { readmem(task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy_p, sizeof(ulong), "task_struct.nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy_p + OFFSET(nsproxy_ipc_ns), KVADDR, &ipc_ns_p, sizeof(ulong), "nsproxy.ipc_ns", FAULT_ON_ERROR|QUIET)) error(FATAL, "cannot determine ipc_namespace location!\n"); if (MEMBER_SIZE("ipc_namespace","ids") == sizeof(ulong) * 3) readmem(ipc_ns_p + OFFSET(ipc_namespace_ids) + sizeof(ulong), KVADDR, &ipc_ids_p, sizeof(ulong), "ipc_namespace.ids[2]", FAULT_ON_ERROR); else ipc_ids_p = ipc_ns_p + OFFSET(ipc_namespace_ids) + SIZE(ipc_ids); } return ipc_search(ipc_ids_p, specified, specified_value, dump_msg, verbose); } /* * if shared memory information is stored in an array, use this function. */ static int ipc_search_array(ulong ipc_ids_p, int specified, ulong specified_value, int (*fn)(ulong, int, ulong, int, int), int verbose) { ulong entries_p; int max_id, i; ulong *array; int found = 0; int allocated = 0; readmem(ipc_ids_p + OFFSET(ipc_ids_entries), KVADDR, &entries_p, sizeof(ulong), "ipc_ids.entries", FAULT_ON_ERROR); readmem(ipc_ids_p + OFFSET(ipc_ids_max_id), KVADDR, &max_id, sizeof(int), "ipc_ids.max_id", FAULT_ON_ERROR); if (max_id < 0) { if (specified == SPECIFIED_NOTHING && !verbose) fprintf(fp, "(none allocated)\n\n"); return 0; } array = (ulong *)GETBUF(sizeof(ulong *) * (max_id + 1)); if (VALID_MEMBER(ipc_id_ary_p)) readmem(entries_p + OFFSET(ipc_id_ary_p), KVADDR, array, sizeof(ulong *) * (max_id + 1), "ipc_id_ary.p", FAULT_ON_ERROR); else readmem(entries_p, KVADDR, array, sizeof(ulong *)*(max_id+1), "ipc_id array", FAULT_ON_ERROR); for (i=0; i<=max_id; i++) { if (array[i] == 0) continue; if (fn(array[i], specified, specified_value, i, verbose)) { allocated++; found = 1; if (specified != SPECIFIED_NOTHING) break; } } if (specified == SPECIFIED_NOTHING && !verbose) { if (!allocated) fprintf(fp, "(none allocated)\n"); fprintf(fp, "\n"); } FREEBUF(array); if (found) return 1; else return 0; } /* * if shared memory information is stored by using idr, use this function to * get data. */ static int ipc_search_idr(ulong ipc_ids_p, int specified, ulong specified_value, int (*fn)(ulong, int, ulong, int, int), int verbose) { int in_use; ulong ipcs_idr_p; ulong ipc; int next_id, total; int found = 0; readmem(ipc_ids_p + OFFSET(ipc_ids_in_use), KVADDR, &in_use, sizeof(int), "ipc_ids.in_use", FAULT_ON_ERROR); ipcs_idr_p = ipc_ids_p + OFFSET(ipc_ids_ipcs_idr); if (!in_use) { if (specified == SPECIFIED_NOTHING && !verbose) fprintf(fp, "(none allocated)\n\n"); return 0; } for (total = 0, next_id = 0; total < in_use; next_id++) { ipc = idr_find(ipcs_idr_p, next_id); if (ipc == 0) continue; total++; if (fn(ipc, specified, specified_value, next_id, verbose)) { found = 1; if (specified != SPECIFIED_NOTHING) break; } } if (!verbose && specified == SPECIFIED_NOTHING) fprintf(fp, "\n"); if (found || specified == SPECIFIED_NOTHING) return 1; else return 0; } /* * search every idr_layer */ static ulong idr_find(ulong idp, int id) { ulong idr_layer_p; int layer; int idr_layers; int n; int index; readmem(idp + OFFSET(idr_top), KVADDR, &idr_layer_p, sizeof(ulong), "idr.top", FAULT_ON_ERROR); if (!idr_layer_p) return 0; if (VALID_MEMBER(idr_layer_layer)) { readmem(idr_layer_p + OFFSET(idr_layer_layer), KVADDR, &layer, sizeof(int), "idr_layer.layer", FAULT_ON_ERROR); n = (layer + 1) * ipcs_table.idr_bits; } else { readmem(idp + OFFSET(idr_layers), KVADDR, &idr_layers, sizeof(int), "idr.layers", FAULT_ON_ERROR); n = idr_layers * ipcs_table.idr_bits; } id &= MAX_ID_MASK; if (id >= (1 << n)) return 0; while (n > 0 && idr_layer_p) { n -= ipcs_table.idr_bits; index = (id >> n) & ((1 << ipcs_table.idr_bits) - 1); readmem(idr_layer_p + OFFSET(idr_layer_ary) + sizeof(ulong) * index, KVADDR, &idr_layer_p, sizeof(ulong), "idr_layer.ary", FAULT_ON_ERROR); } return idr_layer_p; } /* * only specified is not SPECIFIED_NOTHIND, and the specified_value is found, * then return 1 */ static int dump_shm_info(ulong shp, int specified, ulong specified_value, int id, int verbose) { struct shm_info shm_info; char buf[BUFSIZE]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; char buf7[BUFSIZE]; get_shm_info(&shm_info, shp, id); if (shm_info.deleted) return 0; if (((specified & SPECIFIED_ID) && shm_info.shmid == specified_value) || ((specified & SPECIFIED_ADDR) && shm_info.shmid_kernel == specified_value) || specified == SPECIFIED_NOTHING) { if (verbose || specified != SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=12?12:VADDR_PRLEN, LJUST, "SHMID_KERNEL"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SHMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "BYTES"), mkstring(buf6, 6, LJUST, "NATTCH"), mkstring(buf7, 6, LJUST, "STATUS")); } fprintf(fp, "%s %08x %-10d %-5d %-5o %-10ld %-6ld %-s %-s\n", mkstring(buf, VADDR_PRLEN <= 12 ? 12 : VADDR_PRLEN, LJUST|LONG_HEX, (char *)shm_info.shmid_kernel), shm_info.key, shm_info.shmid, shm_info.uid, shm_info.perms & 0777, shm_info.bytes, shm_info.nattch, shm_info.perms & SHM_DEST ? "dest" : "", shm_info.perms & SHM_LOCKED ? "locked" : ""); if (verbose) { fprintf(fp, "PAGES ALLOCATED/RESIDENT/SWAPPED: %ld/%ld/%ld\n", (shm_info.bytes+PAGESIZE()-1) >> PAGESHIFT(), shm_info.rss, shm_info.swap); fprintf(fp, "INODE: %lx\n", shm_info.shm_inode); } if (verbose || specified != SPECIFIED_NOTHING) fprintf(fp, "\n"); return 1; } else return 0; } /* * only specified is not SPECIFIED_NOTHIND, and the specified_value is found, * then return 1 */ static int dump_sem_info(ulong shp, int specified, ulong specified_value, int id, int verbose) { struct sem_info sem_info; char buf[BUFSIZE]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; get_sem_info(&sem_info, shp, id); if (sem_info.deleted) return 0; if (((specified & SPECIFIED_ID) && sem_info.semid == specified_value) || ((specified & SPECIFIED_ADDR) && sem_info.sem_array == specified_value) || specified == SPECIFIED_NOTHING) { if (specified != SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=10?10:VADDR_PRLEN, LJUST, "SEM_ARRAY"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SEMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "NSEMS")); } fprintf(fp, "%s %08x %-10d %-5d %-5o %-10ld\n", mkstring(buf, VADDR_PRLEN <= 10 ? 10 : VADDR_PRLEN, LJUST|LONG_HEX, (char *)sem_info.sem_array), sem_info.key, sem_info.semid, sem_info.uid, sem_info.perms & 0777, sem_info.nsems); if (specified != SPECIFIED_NOTHING) fprintf(fp, "\n"); return 1; } else return 0; } /* * only specified is not SPECIFIED_NOTHIND, and the specified_value is found, * then return 1 */ static int dump_msg_info(ulong shp, int specified, ulong specified_value, int id, int verbose) { struct msg_info msg_info; char buf[BUFSIZE]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; get_msg_info(&msg_info, shp, id); if (msg_info.deleted) return 0; if (((specified & SPECIFIED_ID) && msg_info.msgid == specified_value) || ((specified & SPECIFIED_ADDR) && msg_info.msg_queue == specified_value) || specified == SPECIFIED_NOTHING) { if (specified != SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=10?10:VADDR_PRLEN, LJUST, "MSG_QUEUE"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "MSQID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 12, LJUST, "USED-BYTES"), mkstring(buf6, 12, LJUST, "MESSAGES")); } fprintf(fp, "%s %08x %-10d %-5d %-5o %-12ld %-12ld\n", mkstring(buf, VADDR_PRLEN <= 10 ? 10 : VADDR_PRLEN, LJUST|LONG_HEX, (char *)msg_info.msg_queue), msg_info.key, msg_info.msgid, msg_info.uid, msg_info.perms & 0777, msg_info.bytes, msg_info.messages); if (specified != SPECIFIED_NOTHING) fprintf(fp, "\n"); return 1; } else return 0; } static void get_shm_info(struct shm_info *shm_info, ulong shp, int id) { char buf[BUFSIZE]; ulong filep, dentryp, inodep; shm_info->shmid_kernel = shp - OFFSET(shmid_kernel_shm_perm); /* * cache shmid_kernel */ readmem(shm_info->shmid_kernel, KVADDR, buf, SIZE(shmid_kernel), "shmid_kernel", FAULT_ON_ERROR); shm_info->key = INT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_key)); if (VALID_MEMBER(shmid_kernel_id)) shm_info->shmid = INT(buf + OFFSET(shmid_kernel_id)); else shm_info->shmid = INT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_id)); shm_info->uid = UINT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_uid)); if (BITS32()) shm_info->perms = USHORT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_mode)); else shm_info->perms = UINT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_mode)); shm_info->bytes = ULONG(buf + OFFSET(shmid_kernel_shm_segsz)); shm_info->nattch = ULONG(buf + OFFSET(shmid_kernel_shm_nattch)); filep = ULONG(buf + OFFSET(shmid_kernel_shm_file)); readmem(filep + OFFSET(file_f_dentry), KVADDR, &dentryp, sizeof(ulong), "file.f_dentry", FAULT_ON_ERROR); readmem(dentryp + OFFSET(dentry_d_inode), KVADDR, &inodep, sizeof(ulong), "dentry.d_inode", FAULT_ON_ERROR); /* * shm_inode here is the vfs_inode of struct shmem_inode_info */ shm_info->shm_inode = inodep; shm_info->rss = 0; shm_info->swap = 0; add_rss_swap(inodep, is_file_hugepages(filep), &shm_info->rss, &shm_info->swap); shm_info->deleted = UINT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_deleted)); } static void get_sem_info(struct sem_info *sem_info, ulong shp, int id) { char buf[BUFSIZE]; sem_info->sem_array = shp - OFFSET(sem_array_sem_perm); /* * cache sem_array */ readmem(sem_info->sem_array, KVADDR, buf, SIZE(sem_array), "sem_array", FAULT_ON_ERROR); sem_info->key = INT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_key)); if (VALID_MEMBER(sem_array_sem_id)) sem_info->semid = INT(buf + OFFSET(sem_array_sem_id)); else if (VALID_MEMBER(kern_ipc_perm_id)) sem_info->semid = INT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_id)); else { ulong seq; seq = ULONG(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_seq)); sem_info->semid = ipcs_table.seq_multiplier * seq + id; } sem_info->uid = UINT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_uid)); if (BITS32()) sem_info->perms = USHORT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_mode)); else sem_info->perms = UINT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_mode)); sem_info->nsems = ULONG(buf + OFFSET(sem_array_sem_nsems)); sem_info->deleted = UINT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_deleted)); } static void get_msg_info(struct msg_info *msg_info, ulong shp, int id) { char buf[BUFSIZE]; msg_info->msg_queue = shp - OFFSET(msg_queue_q_perm); /* * cache msg_queue */ readmem(msg_info->msg_queue, KVADDR, buf, SIZE(msg_queue), "msg_queue", FAULT_ON_ERROR); msg_info->key = INT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_key)); if (VALID_MEMBER(msg_queue_q_id)) msg_info->msgid = INT(buf + OFFSET(msg_queue_q_id)); else if (VALID_MEMBER(kern_ipc_perm_id)) msg_info->msgid = INT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_id)); else { ulong seq; seq = ULONG(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_seq)); msg_info->msgid = ipcs_table.seq_multiplier * seq + id; } msg_info->uid = UINT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_uid)); if (BITS32()) msg_info->perms = USHORT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_mode)); else msg_info->perms = UINT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_mode)); msg_info->bytes = ULONG(buf + OFFSET(msg_queue_q_cbytes)); msg_info->messages = ULONG(buf + OFFSET(msg_queue_q_qnum)); msg_info->deleted = UINT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_deleted)); } /* * get rss & swap related to every shared memory, and get the total number of rss * & swap */ static void add_rss_swap(ulong inode_p, int hugepage, ulong *rss, ulong *swap) { unsigned long mapping_p, nr_pages; readmem(inode_p + OFFSET(inode_i_mapping), KVADDR, &mapping_p, sizeof(ulong), "inode.i_mapping", FAULT_ON_ERROR); readmem(mapping_p + OFFSET(address_space_nrpages), KVADDR, &nr_pages, sizeof(ulong), "address_space.nrpages", FAULT_ON_ERROR); if (hugepage) { unsigned long pages_per_hugepage; if (VALID_SIZE(hstate)) { unsigned long i_sb_p, hsb_p, hstate_p; unsigned int order; readmem(inode_p + OFFSET(inode_i_sb), KVADDR, &i_sb_p, sizeof(ulong), "inode.i_sb", FAULT_ON_ERROR); readmem(i_sb_p + OFFSET(super_block_s_fs_info), KVADDR, &hsb_p, sizeof(ulong), "super_block.s_fs_info", FAULT_ON_ERROR); readmem(hsb_p + OFFSET(hugetlbfs_sb_info_hstate), KVADDR, &hstate_p, sizeof(ulong), "hugetlbfs_sb_info.hstate", FAULT_ON_ERROR); readmem(hstate_p + OFFSET(hstate_order), KVADDR, &order, sizeof(uint), "hstate.order", FAULT_ON_ERROR); pages_per_hugepage = 1 << order; } else { unsigned long hpage_shift; /* * HPAGE_SHIFT is 21 after commit 83a5101b * (kernel > 2.6.24) */ if (THIS_KERNEL_VERSION > LINUX(2, 6, 24)) { hpage_shift = 21; } else { /* * HPAGE_SHIFT: * x86(PAE): 21 * x86(no PAE): 22 * x86_64: 21 */ if ((machine_type("X86") && !(machdep->flags & PAE))) hpage_shift = 22; else hpage_shift = 21; } pages_per_hugepage = (1 << hpage_shift) / PAGESIZE(); } *rss += pages_per_hugepage * nr_pages; } else { unsigned long swapped; *rss += nr_pages; readmem(inode_p - OFFSET(shmem_inode_info_vfs_inode) + OFFSET(shmem_inode_info_swapped), KVADDR, &swapped, sizeof(ulong), "shmem_inode_info.swapped", FAULT_ON_ERROR); *swap += swapped; } } static int is_file_hugepages(ulong file_p) { unsigned long f_op, sfd_p; again: readmem(file_p + OFFSET(file_f_op), KVADDR, &f_op, sizeof(ulong), "file.f_op", FAULT_ON_ERROR); if (f_op == ipcs_table.hugetlbfs_f_op_addr) return 1; if (ipcs_table.use_shm_f_op) { if (ipcs_table.shm_f_op_huge_addr != -1) { if (f_op == ipcs_table.shm_f_op_huge_addr) return 1; } else { if (f_op == ipcs_table.shm_f_op_addr) { readmem(file_p + OFFSET(file_private_data), KVADDR, &sfd_p, sizeof(ulong), "file.private_data", FAULT_ON_ERROR); readmem(sfd_p + OFFSET(shm_file_data_file), KVADDR, &file_p, sizeof(ulong), "shm_file_data.file", FAULT_ON_ERROR); goto again; } } } return 0; } crash-7.1.4/ibm_common.h0000664000000000000000000000726112634305150013620 0ustar rootroot/* ibm_common.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * header file for zgetdump * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Despina Papadopoulou */ /* This header file holds the architecture specific crash dump header */ #ifndef _ZGETDUMP_H #define _ZGETDUMP_H /* definitions (this has to match with vmdump.h of lcrash */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ /* * Structure: s390_dump_header_t * Function: This is the header dumped at the top of every valid s390 crash * dump. */ typedef struct _s390_dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* 0x000 */ /* the version number of this dump */ uint32_t dh_version; /* 0x008 */ /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* 0x00c */ /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* 0x010 */ /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* 0x014 */ /* the size of all physical memory */ uint64_t dh_memory_size; /* 0x018 */ /* the start of physical memory */ uint64_t dh_memory_start; /* 0x020 */ /* the end of physical memory */ uint64_t dh_memory_end; /* 0x028 */ /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* 0x030 */ /* ensure that dh_tod and dh_cpu_id are 8 byte aligned */ uint32_t dh_pad; /* 0x034 */ /* the time of the dump generation using stck */ uint64_t dh_tod; /* 0x038 */ /* cpu id */ uint64_t dh_cpu_id; /* 0x040 */ /* arch */ uint32_t dh_arch; /* 0x048 */ /* volume number */ uint32_t dh_volnr; /* 0x04c */ /* build arch */ uint32_t dh_build_arch; /* 0x050 */ /* fill up to 4096 byte */ unsigned char end_pad[0x1000-0x054]; /* 0x054 */ } __attribute__((packed)) s390_dump_header_t; /* * Structure: s390_dump_end_marker_t * Function: This end marker should be at the end of every valid s390 crash * dump. */ typedef struct _s390_dump_end_marker_{ char end_string[8]; unsigned long long end_time; } __attribute__((packed)) s390_dump_end_marker_t; #endif /* _ASM_VMDUMP_H */ crash-7.1.4/lkcd_dump_v5.h0000775000000000000000000002206512634305150014057 0ustar rootroot/* lkcd_dump_v5.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net) * Copyright 2001 Matt D. Robinson. All rights reserved. * * Most of this is the same old stuff from vmdump.h, except now we're * actually a stand-alone driver plugged into the block layer interface, * with the exception that we now allow for compression modes externally * loaded (e.g., someone can come up with their own). */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _DUMP_H #define _DUMP_H //#include /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #ifndef MCLX /* * MCLX NOTE: the architecture-specific headers are being ignored until * deemed necessary; crash has never used them functionally, and only * referencing them in the dump_sgi_environment() helper routines. */ /* necessary header files */ #include /* for architecture-specific header */ #endif #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ void *dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; #ifdef __KERNEL__ /* * Structure: dump_compress_t * Function: This is what an individual compression mechanism can use * to plug in their own compression techniques. It's always * best to build these as individual modules so that people * can put in whatever they want. */ typedef struct dump_compress_s { /* the list_head structure for list storage */ struct list_head list; /* the type of compression to use (DUMP_COMPRESS_XXX) */ int compress_type; /* the compression function to call */ int (*compress_func)(char *, int, char *, int); } dump_compress_t; extern int dump_init(void); extern void dump_execute(char *, struct pt_regs *); extern int page_is_ram(unsigned long); #endif /* __KERNEL__ */ #endif /* _DUMP_H */ crash-7.1.4/lkcd_x86_trace.c0000664000000000000000000041770412634305150014303 0ustar rootroot/* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ /* * lkcd_x86_trace.c * * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted as noted from the following LKCD files: * * lkcdutils-4.1/lcrash/arch/i386/lib/dis.c * lkcdutils-4.1/lcrash/arch/i386/lib/trace.c * lkcdutils-4.1/libutil/kl_queue.c */ #ifdef X86 #ifdef REDHAT #include "lkcd_x86_trace.h" #undef XEN_HYPER_MODE static int XEN_HYPER_MODE(void) { return (pc->flags & XEN_HYPER) != 0; } static void *kl_alloc_block(int, int); static void kl_free_block(void *); static void GET_BLOCK(kaddr_t, unsigned, void *); static void kl_get_kaddr(kaddr_t, void *); static char *kl_funcname(kaddr_t); static kaddr_t kl_funcaddr(kaddr_t); static syment_t *kl_lkup_symaddr(kaddr_t); static k_error_t kl_get_task_struct(kaddr_t, int, void *); static kaddr_t kl_kernelstack(kaddr_t); static kaddr_t get_call_pc(kaddr_t); static int get_jmp_instr(kaddr_t, kaddr_t, kaddr_t *, char *, char **); static int is_push(unsigned int); static int is_pop(unsigned int); static int get_framesize(kaddr_t, struct bt_info *); static int cache_framesize(int, kaddr_t funcaddr, int *, void **); struct framesize_cache; static int framesize_modify(struct framesize_cache *); struct framesize_mods; static int compiler_matches(struct framesize_mods *); static sframe_t *alloc_sframe(trace_t *, int); static void free_sframes(trace_t *); static void free_trace_rec(trace_t *); static void clean_trace_rec(trace_t *); static int setup_trace_rec(kaddr_t, kaddr_t, int, trace_t *); static int valid_ra(kaddr_t); static int valid_ra_function(kaddr_t, char *); static int eframe_incr(kaddr_t, char *); static int find_trace(kaddr_t, kaddr_t, kaddr_t, kaddr_t, trace_t *, int); static void dump_stack_frame(trace_t *, sframe_t *, FILE *); static void print_trace(trace_t *, int, FILE *); static int eframe_type(uaddr_t *); static char *funcname_display(char *, ulong, struct bt_info *, char *); static void print_eframe(FILE *, uaddr_t *); static void trace_banner(FILE *); static void print_kaddr(kaddr_t, FILE *, int); int do_text_list(kaddr_t, int, FILE *); int print_traces(struct bt_info *, int, int, FILE *); static int get_instr_info(kaddr_t, instr_rec_t *); static instr_rec_t *get_instr_stream(kaddr_t, int, int); static void free_instr_stream(instr_rec_t *); static trace_t *alloc_trace_rec(int); static void kl_enqueue(element_t**, element_t*); static element_t *kl_dequeue(element_t**); static void handle_trace_error(struct bt_info *, int, FILE *); static int verify_back_trace(struct bt_info *); static int recoverable(struct bt_info *, FILE *); static void fill_instr_cache(kaddr_t, char *); static void do_bt_reference_check(struct bt_info *, sframe_t *); static void print_stack_entry(struct bt_info *, int, ulong, ulong, char *, sframe_t *, FILE *); static struct syment *eframe_label(char *, ulong); static int dump_framesize_cache(FILE *, struct framesize_cache *); static int modify_framesize_cache_entry(FILE *, ulong, int); static int framesize_debug(struct bt_info *, FILE *); static int kernel_entry_from_user_space(sframe_t *, struct bt_info *); k_error_t klib_error = 0; static void * kl_alloc_block(int size, int flags) { return ((void *)GETBUF(size)); } static void kl_free_block(void *blk) { if (blk) FREEBUF(blk); } static void GET_BLOCK(kaddr_t addr, unsigned size, void *buffer) { KL_ERROR = 0; if (!readmem(addr, KVADDR, (void *)buffer, (ulong)size, "GET_BLOCK", RETURN_ON_ERROR|QUIET)) { console("GET_BLOCK: %lx (%d/0x%x)\n", addr, size, size); KL_ERROR = KLE_INVALID_READ; } } static void kl_get_kaddr(kaddr_t addr, void *bp) { KL_ERROR = 0; GET_BLOCK(addr, 4, bp); } static char * kl_funcname(kaddr_t pc) { struct syment *sp; char *buf, *name; struct load_module *lm; if ((sp = value_search(pc, NULL))) { if (STREQ(sp->name, "_stext") && (sp->value == (sp+1)->value)) sp++; switch (sp->type) { case 'r': if (strstr(sp->name, "_interrupt") || STREQ(sp->name, "call_do_IRQ")) return sp->name; break; case 't': case 'T': return sp->name; } if (is_kernel_text(pc)) return sp->name; } if (IS_MODULE_VADDR(pc)) { buf = GETBUF(BUFSIZE); name = &buf[BUFSIZE/2]; if (module_symbol(pc, NULL, NULL, buf, output_radix)) { sprintf(name, "(%s)", buf); return name; } else { FREEBUF(buf); return "(unknown module)"; } } if ((lm = init_module_function(pc))) return ("init_module"); return NULL; } static kaddr_t kl_funcaddr(kaddr_t pc) { struct syment *sp; struct load_module *lm; if ((sp = value_search(pc, NULL))) { switch (sp->type) { case 'r': if (strstr(sp->name, "_interrupt") || STREQ(sp->name, "call_do_IRQ")) return sp->value; break; case 't': case 'T': return sp->value; } if (is_kernel_text(pc)) return sp->value; } if ((lm = init_module_function(pc))) return lm->mod_init_module_ptr; return((kaddr_t)NULL); } static struct syment init_module_syment = { .name = "init_module", .type = 't', }; static syment_t * kl_lkup_symaddr(kaddr_t addr) { struct syment *sp; struct load_module *lm; if ((sp = value_search(addr, NULL))) return sp; if ((lm = init_module_function(addr))) { init_module_syment.value = lm->mod_init_module_ptr; return &init_module_syment; } return NULL; } static k_error_t kl_get_task_struct(kaddr_t value, int mode, void *tsp) { KL_ERROR = 0; if (value == tt->last_task_read) BCOPY(tt->task_struct, tsp, TASK_STRUCT_SZ); else GET_BLOCK(value, TASK_STRUCT_SZ, tsp); return KL_ERROR; } static kaddr_t kl_kernelstack(kaddr_t task) { kaddr_t saddr; return (saddr = (task + KSTACK_SIZE)); } static void print_kaddr(kaddr_t kaddr, FILE *ofp, int flag) { fprintf(ofp, "%lx", (ulong)kaddr); } #endif /* REDHAT */ /* * lkcdutils-4.1/lcrash/arch/i386/lib/trace.c */ #ifndef REDHAT /* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ #include #include #include #endif /* !REDHAT */ /* * get_call_pc() */ kaddr_t get_call_pc(kaddr_t ra) { kaddr_t addr = 0; instr_rec_t *irp; if (!(irp = get_instr_stream(ra, 1, 0))) { return((kaddr_t)NULL); } if (!irp->prev) { free_instr_stream(irp); return((kaddr_t)NULL); } if ((irp->prev->opcode == 0x00e8) || (irp->prev->opcode == 0xff02)) { addr = irp->prev->addr; } free_instr_stream(irp); return(addr); } /* * get_jmp_instr() */ int get_jmp_instr(kaddr_t addr, kaddr_t isp, kaddr_t *caddr, char *fname, char **cfname) { kaddr_t a; int offset; instr_rec_t *irp; if (!(irp = get_instr_stream(addr, 1, 0))) { return(1); } if (!irp->prev) { free_instr_stream(irp); return(1); } irp = irp->prev; if (!(irp->opcode == 0x00e8) && !(irp->opcode == 0xff02)) { free_instr_stream(irp); return(1); } /* Check for the easiest case first... */ if (irp->opcode == 0xe8) { a = irp->operand[0].op_addr; if ((*cfname = kl_funcname(a))) { *caddr = a; } } else if (irp->opcode == 0xff02) { switch (irp->modrm) { case 0x14: if (irp->sib == 0x85) { kl_get_kaddr(addr - 4, &a); if (KL_ERROR) { free_instr_stream(irp); return(1); } if (strstr(fname, "system_call")) { GET_BLOCK(isp + 28, 4, &offset); a += (offset * 4); kl_get_kaddr(a, &a); if ((*cfname = kl_funcname(a))) { *caddr = a; } } } break; case 0xc2: /* EAX */ case 0xca: /* ECX */ case 0xd2: /* EDX */ case 0xda: /* EBX */ case 0xea: /* EBP */ case 0xf2: /* ESI */ case 0xfa: /* EDI */ break; } } free_instr_stream(irp); return(0); } /* * is_push() */ int is_push(unsigned int opcode) { switch(opcode) { case 0x0006: case 0x000e: case 0x0016: case 0x001e: case 0x0050: case 0x0051: case 0x0052: case 0x0053: case 0x0054: case 0x0055: case 0x0056: case 0x0057: case 0x0068: case 0x006a: case 0x009c: case 0x0fa0: case 0x0fa8: case 0xff06: return(1); case 0x0060: return(2); } return(0); } /* * is_pop() */ int is_pop(unsigned int opcode) { switch(opcode) { case 0x0007: case 0x0017: case 0x001f: case 0x0058: case 0x0059: case 0x005a: case 0x005b: case 0x005c: case 0x005d: case 0x005e: case 0x005f: case 0x008f: case 0x009d: case 0x0fa1: case 0x0fa9: return(1); case 0x0061: return(2); } return(0); } #ifdef REDHAT #define FRAMESIZE_VALIDATE (0x1) struct framesize_cache { kaddr_t pc; int flags; int frmsize; int bp_adjust; }; #define FRAMESIZE_CACHE (200) static struct framesize_cache framesize_cache[FRAMESIZE_CACHE] = {{0}}; static struct framesize_cache framesize_cache_empty = {0}; #define FSZ_QUERY (1) #define FSZ_VALIDATE (2) #define FSZ_ENTER (3) #define FRAMESIZE_CACHE_QUERY(pc,szp) cache_framesize(FSZ_QUERY, pc, szp, NULL) #define FRAMESIZE_CACHE_ENTER(pc,szp) cache_framesize(FSZ_ENTER, pc, szp, NULL) #define FRAMESIZE_CACHE_VALIDATE(pc,fcpp) cache_framesize(FSZ_VALIDATE, pc, NULL, fcpp) static int cache_framesize(int cmd, kaddr_t funcaddr, int *fsize, void **ptr) { int i; static ulong last_cleared = 0; retry: for (i = 0; i < FRAMESIZE_CACHE; i++) { if (framesize_cache[i].pc == funcaddr) { switch (cmd) { case FSZ_VALIDATE: *ptr = &framesize_cache[i]; return TRUE; case FSZ_QUERY: *fsize = framesize_cache[i].frmsize; return TRUE; case FSZ_ENTER: *fsize = framesize_cache[i].frmsize; return TRUE; } } /* * The entry does not exist. * * If FSZ_QUERY or FSZ_VALIDATE, return their * no-such-entry indications. * * Otherwise, load up the entry with the new data, and * and modify it with known kludgery. */ if (framesize_cache[i].pc == 0) { switch (cmd) { case FSZ_QUERY: return FALSE; case FSZ_VALIDATE: *ptr = &framesize_cache_empty; return FALSE; case FSZ_ENTER: framesize_cache[i].pc = funcaddr; framesize_cache[i].frmsize = *fsize; framesize_cache[i].bp_adjust = 0; framesize_modify(&framesize_cache[i]); *fsize = framesize_cache[i].frmsize; return TRUE; } } } console("framesize_cache is full\n"); /* * No place to put it, or it doesn't exist. */ switch (cmd) { case FSZ_VALIDATE: *ptr = &framesize_cache_empty; return FALSE; case FSZ_QUERY: return FALSE; case FSZ_ENTER: BZERO(&framesize_cache[last_cleared % FRAMESIZE_CACHE], sizeof(struct framesize_cache)); last_cleared++; goto retry; } return FALSE; /* can't get here -- for compiler happiness */ } /* * More kludgery for compiler oddities. */ #define COMPILER_VERSION_MASK (1) /* deprecated -- usable up to 3.3.3 */ #define COMPILER_VERSION_EQUAL (2) #define COMPILER_VERSION_START (3) #define COMPILER_VERSION_RANGE (4) struct framesize_mods { char *funcname; char *called_function; ulong compiler_flag; ulong compiler1; ulong compiler2; int pre_adjust; int post_adjust; } framesize_mods[] = { { "do_select", "schedule_timeout", COMPILER_VERSION_START, GCC(3,3,2), 0, 0, 0 }, { "svc_recv", "schedule_timeout", COMPILER_VERSION_START, GCC(3,3,2), 0, 0, 0 }, { "__down_interruptible", "schedule", COMPILER_VERSION_START, GCC(3,3,2), 0, 0, 0 }, { "netconsole_netdump", NULL, COMPILER_VERSION_START, GCC(3,3,2), 0, 0, -28 }, { "generic_file_write", NULL, COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 20 }, { "block_prepare_write", NULL, COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 72 }, { "receive_chars", NULL, COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 48 }, { "default_idle", NULL, COMPILER_VERSION_START, GCC(2,96,0), 0, -4, 0 }, { "hidinput_hid_event", NULL, COMPILER_VERSION_START, GCC(4,1,2), 0, 0, 28 }, { NULL, NULL, 0, 0, 0, 0, 0 }, }; static int framesize_modify(struct framesize_cache *fc) { char *funcname; struct framesize_mods *fmp; if (!(funcname = kl_funcname(fc->pc))) return FALSE; if (fc->frmsize < 0) { if (CRASHDEBUG(1)) error(INFO, "bogus framesize: %d for pc: %lx (%s)\n", fc->frmsize, fc->pc, funcname); fc->frmsize = 0; } for (fmp = &framesize_mods[0]; fmp->funcname; fmp++) { if (STREQ(funcname, fmp->funcname) && compiler_matches(fmp)) break; } if (!fmp->funcname) return FALSE; if (fmp->pre_adjust) fc->frmsize += fmp->pre_adjust; if (fmp->post_adjust) fc->bp_adjust = fmp->post_adjust; if (fmp->called_function) { if (STREQ(fmp->called_function,x86_function_called_by(fc->pc))); fc->flags |= FRAMESIZE_VALIDATE; } return TRUE; } static int compiler_matches(struct framesize_mods *fmp) { switch (fmp->compiler_flag) { case COMPILER_VERSION_MASK: if (fmp->compiler1 & (kt->flags & GCC_VERSION_DEPRECATED)) return TRUE; break; case COMPILER_VERSION_EQUAL: if (THIS_GCC_VERSION == fmp->compiler1) return TRUE; break; case COMPILER_VERSION_START: if (THIS_GCC_VERSION >= fmp->compiler1) return TRUE; break; case COMPILER_VERSION_RANGE: if ((THIS_GCC_VERSION >= fmp->compiler1) && (THIS_GCC_VERSION <= fmp->compiler2)) return TRUE; break; } return FALSE; } static int dump_framesize_cache(FILE *ofp, struct framesize_cache *fcp) { int i, count; struct syment *sp, *spm; ulong offset; int once; for (i = once = count = 0; i < FRAMESIZE_CACHE; i++) { if (framesize_cache[i].pc == 0) break; count++; if (fcp && (fcp != &framesize_cache[i])) continue; if (!once) { fprintf(ofp, "RET ADDR FSZ BPA V FUNCTION\n"); once++; } fprintf(ofp, "%8x %4d %4d %s ", framesize_cache[i].pc, framesize_cache[i].frmsize, framesize_cache[i].bp_adjust, framesize_cache[i].flags & FRAMESIZE_VALIDATE ? "V" : "-"); if ((sp = value_search(framesize_cache[i].pc, &offset)) || (spm = kl_lkup_symaddr(framesize_cache[i].pc))) { if (sp) fprintf(ofp, "(%s+", sp->name); else { fprintf(ofp, "(%s+", spm->name); offset = framesize_cache[i].pc - spm->value; } switch (pc->output_radix) { case 10: fprintf(ofp, "%ld)", offset); break; default: case 16: fprintf(ofp, "%lx)", offset); break; } } fprintf(ofp, "\n"); if (fcp) return 0; } if (!count) fprintf(ofp, "framesize cache emtpy\n"); if (kt->flags & RA_SEEK) fprintf(ofp, "RA_SEEK: ON\n"); if (kt->flags & NO_RA_SEEK) fprintf(ofp, "NO_RA_SEEK: ON\n"); return count; } static int modify_framesize_cache_entry(FILE *ofp, ulong eip, int framesize) { int i, found, all_cleared; for (i = found = all_cleared = 0; i < FRAMESIZE_CACHE; i++) { if (!eip) { switch (framesize) { case -1: framesize_cache[i].flags |= FRAMESIZE_VALIDATE; break; case -2: framesize_cache[i].flags &= ~FRAMESIZE_VALIDATE; break; default: framesize_cache[i].pc = 0; framesize_cache[i].frmsize = 0; framesize_cache[i].flags = 0; all_cleared = TRUE; break; } continue; } if (framesize_cache[i].pc == 0) break; if (framesize_cache[i].pc == eip) { found++; switch (framesize) { case -1: framesize_cache[i].flags |= FRAMESIZE_VALIDATE; break; case -2: framesize_cache[i].flags &= ~FRAMESIZE_VALIDATE; break; default: framesize_cache[i].frmsize = framesize; break; } dump_framesize_cache(ofp, &framesize_cache[i]); return TRUE; } } if (eip && !found) fprintf(ofp, "eip: %lx not found in framesize cache\n", eip); if (all_cleared) fprintf(ofp, "framesize cache cleared\n"); return FALSE; } /* * If eip, look for it and replace its frmsize with the passed-in value. * If no eip, frmsize of zero means clear the cache, non-zero displays it. */ static int framesize_debug(struct bt_info *bt, FILE *ofp) { ulong eip; int frmsize; eip = bt->hp->eip; frmsize = (int)bt->hp->esp; if (!eip) { switch (frmsize) { case 0: case -1: case -2: return modify_framesize_cache_entry(ofp, 0, frmsize); default: return dump_framesize_cache(ofp, NULL); } } return modify_framesize_cache_entry(ofp, eip, frmsize); } #endif /* REDHAT */ /* #define FRMSIZE_DBG 1 #define FRMSIZE2_DBG 1 */ /* * get_framesize() */ int #ifdef REDHAT get_framesize(kaddr_t pc, struct bt_info *bt) #else get_framesize(kaddr_t pc) #endif { int size, ret, frmsize = 0; kaddr_t addr; instr_rec_t irp; syment_t *sp; #ifdef REDHAT int check_IRQ_stack_switch = 0; syment_t *jmpsp, *trampsp; ulong offset; int frmsize_restore = 0; int last_add = 0; if (FRAMESIZE_CACHE_QUERY(pc, &frmsize)) return frmsize; frmsize = 0; #endif if (!(sp = kl_lkup_symaddr(pc))) { return(0); } #ifdef REDHAT if (STREQ(sp->name, "do_IRQ") && (tt->flags & IRQSTACKS)) check_IRQ_stack_switch++; if (STREQ(sp->name, "stext_lock") || STRNEQ(sp->name, ".text.lock.")) { jmpsp = x86_text_lock_jmp(pc, &offset); if (jmpsp) { console("get_framesize: stext_lock %lx => %s\n", pc, jmpsp->name); pc = jmpsp->value + offset; sp = jmpsp; } } if ((trampsp = x86_is_entry_tramp_address(pc, &offset))) { if (STREQ(sp->name, "system_call")) return 0; pc = trampsp->value + offset; } #endif #ifdef FRMSIZE_DBG fprintf(stderr, "get_framesize(): pc=0x%x (0x%x:%s)\n", pc, sp->s_addr, sp->s_name); #endif addr = sp->s_addr; while (addr <= pc) { bzero(&irp, sizeof(irp)); irp.aflag = 1; irp.dflag = 1; if (!(size = get_instr_info(addr, &irp))) { fprintf(stderr, "ZERO SIZE!!\n"); return(-1); } if (size != irp.size) { fprintf(stderr, "SIZE DOES NOT MATCH!!\n"); } #ifdef REDHAT /* * Account for do_IRQ() stack switch. */ if (check_IRQ_stack_switch && (irp.opcode == 0xff02) && (irp.operand[0].op_reg == 0x7)) break; /* * Account for embedded "ret" instructions screwing up * the frame size calculation. */ if (irp.opcode == 0xc3) { frmsize += frmsize_restore; frmsize_restore = 0; last_add = FALSE; } else if ((irp.opcode == 0x8300) && (irp.operand[0].op_reg == R_eSP)) { frmsize_restore += irp.operand[1].op_addr; last_add = TRUE; } else if ((irp.opcode == 0x8100) && (irp.operand[0].op_reg == R_eSP)) { frmsize_restore += irp.operand[1].op_addr; last_add = TRUE; } else if ((ret = is_pop(irp.opcode))) { if (ret == 2) frmsize_restore += (8 * 4); else frmsize_restore += 4; last_add = FALSE; } else { if (last_add) last_add = FALSE; else frmsize_restore = 0; } #endif /* REDHAT */ #ifdef REDHAT if ((irp.opcode == 0x8300) || (irp.opcode == 0x8100)) { #else if (irp.opcode == 0x8300) { #endif /* e.g., addl $0x8,%esp */ if (irp.operand[0].op_reg == R_eSP) { frmsize -= irp.operand[1].op_addr; #ifdef FRMSIZE_DBG fprintf(stderr, " addl --> 0x%x: -%d\n", addr, irp.operand[1].op_addr); #endif } } else if ((irp.opcode == 0x8305) || (irp.opcode == 0x8105)) { /* e.g., subl $0x40,%esp */ if (irp.operand[0].op_reg == R_eSP) { frmsize += irp.operand[1].op_addr; #ifdef FRMSIZE_DBG fprintf(stderr, " subl --> 0x%x: +%d\n", addr, irp.operand[1].op_addr); #endif } } else if ((ret = is_push(irp.opcode))) { if (ret == 2) { frmsize += (8 * 4); #ifdef FRMSIZE_DBG fprintf(stderr, " pusha --> 0x%x: +%d\n", addr, (8 * 4)); #endif } else { frmsize += 4; #ifdef FRMSIZE_DBG fprintf(stderr, " pushl --> 0x%x: +%d\n" , addr, 4); #endif } } else if ((ret = is_pop(irp.opcode))) { if (ret == 2) { frmsize -= (8 * 4); #ifdef FRMSIZE_DBG fprintf(stderr, " popa --> 0x%x: -%d\n", addr, (8 * 4)); #endif } else { frmsize -= 4; #ifdef FRMSIZE_DBG fprintf(stderr, " popl --> 0x%x: -%d\n", addr, 4); #endif } #ifdef FRMSIZE2_DBG } else { fprintf(stderr, " 0x%x: opcode=0x%x\n", addr, irp.opcode); #endif } addr += size; } #ifdef REDHAT /* * Account for fact that schedule may not "call" anybody, plus * the difference between gcc 3.2 and earlier compilers. */ if (STREQ(kl_funcname(pc), "schedule") && !(bt->flags & BT_CONTEXT_SWITCH)) frmsize -= THIS_GCC_VERSION == GCC(3,2,0) ? 4 : 8; FRAMESIZE_CACHE_ENTER(pc, &frmsize); #endif return(frmsize); } #ifndef REDHAT /* * print_pc() */ void print_pc(kaddr_t addr, FILE *ofp) { int offset = 0; syment_t *sp; if ((sp = kl_lkup_symaddr(addr))) { offset = addr - sp->s_addr; } /* Print out address */ fprintf(ofp, "0x%x", addr); /* Print out symbol name */ if (sp) { if (offset) { fprintf(ofp, " <%s+%d>", sp->s_name, offset); } else { fprintf(ofp, " <%s>", sp->s_name); } } } #endif /* !REDHAT */ /* * alloc_sframe() -- Allocate a stack frame record */ sframe_t * alloc_sframe(trace_t *trace, int flags) { sframe_t *f; if (flags & C_PERM) { f = (sframe_t *)kl_alloc_block(sizeof(sframe_t), K_PERM); } else { f = (sframe_t *)kl_alloc_block(sizeof(sframe_t), K_TEMP); } if (!f) { return((sframe_t *)NULL); } f->level = trace->nframes; return(f); } /* * free_sframes() -- Free all stack frames allocated to a trace record. */ void free_sframes(trace_t *t) { sframe_t *sf; t->nframes = 0; sf = t->frame; while(t->frame) { sf = (sframe_t *)kl_dequeue((element_t **)&t->frame); if (sf->srcfile) { kl_free_block((void *)sf->srcfile); } kl_free_block((void *)sf); } t->frame = (sframe_t *)NULL; } /* * alloc_trace_rec() -- Allocate stack trace header */ trace_t * alloc_trace_rec(int flags) { trace_t *t; if (flags & C_PERM) { t = (trace_t *)kl_alloc_block(sizeof(trace_t), K_PERM); } else { t = (trace_t *)kl_alloc_block(sizeof(trace_t), K_TEMP); } return(t); } /* * free_trace_rec() -- Free memory associated with stack trace header */ void free_trace_rec(trace_t *t) { int i; if (t->tsp) { kl_free_block(t->tsp); } for (i = 0; i < STACK_SEGMENTS; i++) { if (t->stack[i].ptr) { kl_free_block((void *)t->stack[i].ptr); } } free_sframes(t); kl_free_block((void *)t); } /* * clean_trace_rec() -- Clean up stack trace record without releasing * any of the allocated memory (except sframes). */ void clean_trace_rec(trace_t *t) { int i; t->flags = 0; t->task = 0; if (t->tsp) { kl_free_block(t->tsp); t->tsp = 0; } t->stackcnt = 0; for (i = 0; i < STACK_SEGMENTS; i++) { if (t->stack[i].ptr) { t->stack[i].type = 0; t->stack[i].size = 0; t->stack[i].addr = (kaddr_t)NULL; kl_free_block((void *)t->stack[i].ptr); t->stack[i].ptr = (uaddr_t *)NULL; } } free_sframes(t); } /* * setup_trace_rec() */ int setup_trace_rec(kaddr_t saddr, kaddr_t task, int flag, trace_t *trace) { int aflag = K_TEMP; #ifdef REDHAT KL_ERROR = 0; #else kl_reset_error(); #endif if (flag & C_PERM) { aflag = K_PERM; } if (task) { trace->tsp = kl_alloc_block(TASK_STRUCT_SZ, aflag); if (kl_get_task_struct(task, 2, trace->tsp)) { kl_free_block(trace->tsp); trace->tsp = NULL; return(1); } } trace->stack[0].type = S_KERNELSTACK; trace->stack[0].size = STACK_SIZE; /* Get the base address of the stack */ trace->stack[0].addr = saddr - trace->stack[0].size; trace->stack[0].ptr = kl_alloc_block(STACK_SIZE, aflag); if (KL_ERROR) { clean_trace_rec(trace); return(1); } #ifdef REDHAT BCOPY(trace->bt->stackbuf, trace->stack[0].ptr, STACK_SIZE); #else GET_BLOCK(trace->stack[0].addr, STACK_SIZE, trace->stack[0].ptr); #endif if (KL_ERROR) { clean_trace_rec(trace); return(1); } return(0); } /* * valid_ra() */ int valid_ra(kaddr_t ra) { kaddr_t pc; if ((ra < KL_PAGE_OFFSET) || !kl_funcaddr(ra)) return(0); if ((pc = get_call_pc(ra))) return(1); return(0); } /* * valid_ra_function() * * Same as above, but ensure that it calls the funcname passed in. */ int valid_ra_function(kaddr_t ra, char *funcname) { kaddr_t pc; if ((ra < KL_PAGE_OFFSET) || !kl_funcaddr(ra)) return(0); if (!(pc = get_call_pc(ra))) return(0); if (STREQ(x86_function_called_by(ra-5), funcname)) return(1); return(0); } #ifndef REDHAT #include #endif #define KERNEL_EFRAME 0 #define USER_EFRAME 1 #define KERNEL_EFRAME_SZ 13 /* no ss and esp */ #define USER_EFRAME_SZ 15 #ifdef REDHAT #undef __KERNEL_CS #undef __KERNEL_DS #undef __USER_CS #undef __USER_DS #define __KERNEL_CS 0x10 #define __KERNEL_DS 0x18 #define __USER_CS 0x23 #define __USER_DS 0x2B #endif /* * Check if the exception frame is of kernel or user type * Is checking only DS and CS values sufficient ? */ int eframe_type(uaddr_t *int_eframe) { ushort xcs, xds; xcs = (ushort)(int_eframe[INT_EFRAME_CS] & 0xffff); xds = (ushort)(int_eframe[INT_EFRAME_DS] & 0xffff); if ((xcs == __KERNEL_CS) && (xds == __KERNEL_DS)) return KERNEL_EFRAME; #ifdef REDHAT else if ((xcs == 0x60) && (xds == 0x68)) return KERNEL_EFRAME; else if ((xcs == 0x60) && (xds == 0x7b)) return KERNEL_EFRAME; else if (XEN() && (xcs == 0x61) && (xds == 0x7b)) return KERNEL_EFRAME; #endif else if ((xcs == __USER_CS) && (xds == __USER_DS)) return USER_EFRAME; #ifdef REDHAT else if ((xcs == 0x73) && (xds == 0x7b)) return USER_EFRAME; #endif return -1; } void print_eframe(FILE *ofp, uaddr_t *regs) { int type = eframe_type(regs); #ifdef REDHAT x86_dump_eframe_common(NULL, (ulong *)regs, (type == KERNEL_EFRAME)); #else fprintf(ofp, " ebx: %08lx ecx: %08lx edx: %08lx esi: %08lx\n", regs->ebx, regs->ecx, regs->edx, regs->esi); fprintf(ofp, " edi: %08lx ebp: %08lx eax: %08lx ds: %04x\n", regs->edi, regs->ebp, regs->eax, regs->xds & 0xffff); fprintf(ofp, " es: %04x eip: %08lx cs: %04x eflags: %08lx\n", regs->xes & 0xffff, regs->eip, regs->xcs & 0xffff, regs->eflags); if (type == USER_EFRAME) fprintf(ofp, " esp: %08lx ss: %04x\n", regs->esp, regs->xss); #endif } #ifdef REDHAT #define SEEK_VALID_RA() \ { \ while (!valid_ra(ra)) { \ if ((bp + 4) < bt->stacktop) { \ bp += 4; \ ra = GET_STACK_ULONG(bp + 4); \ } else \ break; \ } \ } #define SEEK_VALID_RA_FUNCTION(F) \ { \ while (!valid_ra_function(ra, (F))) { \ if ((bp + 4) < bt->stacktop) { \ bp += 4; \ ra = GET_STACK_ULONG(bp + 4); \ } else \ break; \ } \ } #endif /* * Determine how much to increment the stack pointer to find the * exception frame associated with a generic "error_code" or "nmi" * exception. * * The incoming addr is that of the call to the generic error_code * or nmi exception handler function. Until later 2.6 kernels, the next * instruction had always been an "addl $8,%esp". However, with later * 2.6 kernels, that esp adjustment is no long valid, and there will be * an immediate "jmp" instruction. Returns 4 or 12, whichever is appropriate. * Cache the value the first time, and allow for future changes or additions. */ #define NMI_ADJ (0) #define ERROR_CODE_ADJ (1) #define EFRAME_ADJUSTS (ERROR_CODE_ADJ+1) static int eframe_adjust[EFRAME_ADJUSTS] = { 0 }; static int eframe_incr(kaddr_t addr, char *funcname) { instr_rec_t irp; kaddr_t next; int size, adj, val; if (STRNEQ(funcname, "nmi")) { adj = NMI_ADJ; val = eframe_adjust[NMI_ADJ]; } else if (strstr(funcname, "error_code")) { adj = ERROR_CODE_ADJ; val = eframe_adjust[ERROR_CODE_ADJ]; } else { adj = -1; val = 0; error(INFO, "unexpected exception frame marker: %lx (%s)\n", addr, funcname); } if (val) { console("eframe_incr(%lx, %s): eframe_adjust[%d]: %d\n", addr, funcname, adj, val); return val; } console("eframe_incr(%lx, %s): TBD:\n", addr, funcname); bzero(&irp, sizeof(irp)); irp.aflag = 1; irp.dflag = 1; if (!(size = get_instr_info(addr, &irp))) { if (CRASHDEBUG(1)) error(INFO, "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", addr, funcname, addr); return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12); } console(" addr: %lx size: %d opcode: 0x%x insn: \"%s\"\n", addr, size, irp.opcode, irp.opcodep->name); next = addr + size; bzero(&irp, sizeof(irp)); irp.aflag = 1; irp.dflag = 1; if (!(size = get_instr_info(next, &irp))) { if (CRASHDEBUG(1)) error(INFO, "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", addr, funcname, next); return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12); } console(" next: %lx size: %d opcode: 0x%x insn: \"%s\"\n", next, size, irp.opcode, irp.opcodep->name); if (STREQ(irp.opcodep->name, "jmp")) val = 4; else val = 12; if (adj >= 0) eframe_adjust[adj] = val; return val; } static int xen_top_of_stack(struct bt_info *bt, char *funcname) { ulong stkptr, contents; for (stkptr = bt->stacktop-4; stkptr > bt->stackbase; stkptr--) { contents = GET_STACK_ULONG(stkptr); if (kl_funcname(contents) == funcname) return TRUE; if (valid_ra(contents)) break; } return FALSE; } static char * xen_funcname(struct bt_info *bt, ulong pc) { char *funcname = kl_funcname(pc); if (xen_top_of_stack(bt, funcname) && (pc >= symbol_value("hypercall")) && (pc < symbol_value("ret_from_intr"))) return "hypercall"; return funcname; } static int userspace_return(kaddr_t frame, struct bt_info *bt) { ulong esp0, eframe_addr; uint32_t *stkptr, *eframeptr; if (INVALID_MEMBER(task_struct_thread) || (((esp0 = MEMBER_OFFSET("thread_struct", "esp0")) < 0) && ((esp0 = MEMBER_OFFSET("thread_struct", "sp0")) < 0))) eframe_addr = bt->stacktop - SIZE(pt_regs); else eframe_addr = ULONG(tt->task_struct + OFFSET(task_struct_thread) + esp0) - SIZE(pt_regs); if (!INSTACK(eframe_addr, bt)) return FALSE; stkptr = (uint32_t *)(bt->stackbuf + ((ulong)frame - bt->stackbase)); eframeptr = (uint32_t *)(bt->stackbuf + (eframe_addr - bt->stackbase)); while (stkptr < eframeptr) { if (is_kernel_text_offset(*stkptr)) return FALSE; stkptr++; } return TRUE; } /* * find_trace() * * Given a starting pc (start_cp), starting stack pointer (start_sp), * and stack address, check to see if a valid trace is possible. A * trace is considered valid if no errors are encountered (bad PC, * bad SP, etc.) Certain errors are tolorated however. For example, * if the current stack frame is an exception frame (e.g., VEC_*), * go ahead and return success -- even if PC and SP obtained from * the exception frame are bad (a partial trace is better than no * trace).. * * Return zero if no valid trace was found. Otherwise, return the * number of frames found. If the C_ALL flag is passed in, then * return a trace even if it is a subtrace of a trace that was * previously found. * * Parameters: * * start_pc starting program counter * start_sp starting stack pointer * check_pc if non-NULL, check to see if check_pc/check_sp * check_sp are a sub-trace of trace beginning with spc/ssp * trace structure containing all trace related info (frames, * pages, page/frame counts, etc. * flags */ int find_trace( kaddr_t start_pc, kaddr_t start_sp, kaddr_t check_pc, kaddr_t check_sp, trace_t *trace, int flags) { int curstkidx = 0, frame_size, frame_type; kaddr_t sp, pc, ra, bp, sbase, saddr, func_addr; sframe_t *curframe; char *func_name; uaddr_t *sbp, *asp; #ifdef REDHAT struct syment *sp1; ulong offset; int flag; int interrupted_system_call = FALSE; struct bt_info *bt = trace->bt; uaddr_t *pt; curframe = NULL; #endif sbp = trace->stack[curstkidx].ptr; sbase = trace->stack[curstkidx].addr; saddr = sbase + trace->stack[curstkidx].size; #ifdef REDHAT bp = start_sp + get_framesize(start_pc, bt); #else bp = start_sp + get_framesize(start_pc); #endif if (KL_ERROR || (bp < sbase) || (bp >= saddr)) { return(0); } pc = start_pc; sp = start_sp; func_name = kl_funcname(pc); #ifdef REDHAT if (STREQ(func_name, "context_switch")) bt->flags |= BT_CONTEXT_SWITCH; #endif while (pc) { /* LOOP TRAP! Make sure we are not just looping on the * same frame forever. */ if ((trace->nframes > 1) && (curframe->funcname == curframe->prev->funcname) && (curframe->sp == curframe->prev->sp)) { curframe->error = 1; #ifdef REDHAT bt->flags |= BT_LOOP_TRAP; #endif return(trace->nframes); } #ifdef REDHAT /* * If we wrap back to a lower stack location, we're cooked. */ if ((trace->nframes > 1) && (curframe->sp < curframe->prev->sp)) { curframe->error = 1; bt->flags |= BT_WRAP_TRAP; return(trace->nframes); } #endif /* Allocate space for a stack frame rec */ curframe = alloc_sframe(trace, flags); if (!(func_addr = kl_funcaddr(pc))) { curframe->error = KLE_BAD_PC; UPDATE_FRAME(0, pc, 0, 0, 0, 0, 0, 0, 0, 0); return(trace->nframes); } /* Check to see if check_pc/check_sp points to a sub-trace * of spc/ssp. If it does then don't return a trace (unless * C_ALL). Make sure we free the curframe block since we * wont be linking it in to the trace rec. */ if (check_pc && ((pc == check_pc) && (sp == check_sp))) { kl_free_block((void *)curframe); if (flags & C_ALL) { return(trace->nframes); } else { return(0); } } asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); #ifdef REDHAT if (XEN_HYPER_MODE()) { func_name = xen_funcname(bt, pc); if (STREQ(func_name, "idle_loop") || STREQ(func_name, "hypercall") || STREQ(func_name, "process_softirqs") || STREQ(func_name, "tracing_off") || STREQ(func_name, "page_fault") || STREQ(func_name, "handle_exception") || xen_top_of_stack(bt, func_name)) { UPDATE_FRAME(func_name, pc, 0, sp, bp, asp, 0, 0, bp - sp, 0); return(trace->nframes); } } else if (STREQ(closest_symbol(pc), "cpu_idle")) { func_name = kl_funcname(pc); UPDATE_FRAME(func_name, pc, 0, sp, bp, asp, 0, 0, bp - sp, 0); return(trace->nframes); } ra = GET_STACK_ULONG(bp + 4); /* * HACK: The get_framesize() function can return the proper * value -- as verified by disassembling the function -- but * in rare circumstances there's more to the stack frame than * meets the eye. Until I can figure out why, extra space * can be added here for any "known" anomolies. gcc version * restrictions are also added rather than assuming anything. * See framesize_modify() for kludgery. */ if (!valid_ra(ra)) { char *funcname; struct framesize_cache *fcp; funcname = kl_funcname(pc); FRAMESIZE_CACHE_VALIDATE(pc, (void **)&fcp); bp += fcp->bp_adjust; ra = GET_STACK_ULONG(bp + 4); /* * This anomoly would be caught by the recovery * speculation, but since we know it's an issue * just catch it here first. */ if (STREQ(funcname, "schedule") && (THIS_GCC_VERSION >= GCC(3,2,3))) { SEEK_VALID_RA(); /* * else FRAMESIZE_VALIDATE has been turned on */ } else if (fcp->flags & FRAMESIZE_VALIDATE) { SEEK_VALID_RA_FUNCTION(funcname); /* * Generic speculation continues the search for * a valid RA at a higher stack address. */ } else if ((bt->flags & BT_SPECULATE) && !STREQ(funcname, "context_switch") && !STREQ(funcname, "die") && !(bt->frameptr && ((bp+4) < bt->frameptr))) SEEK_VALID_RA(); } #else kl_get_kaddr(bp + 4, &ra); #endif /* Make sure that the ra we have is a valid one. If not * then back up in the frame, word by word, until we find * one that is good. */ if (!valid_ra(ra)) { int i; i = ((bp - sp + 8) / 4); while (i) { bp -= 4; #ifdef REDHAT ra = GET_STACK_ULONG(bp + 4); #else kl_get_kaddr(bp + 4, &ra); #endif if (valid_ra(ra)) { break; } i--; } if (i == 0) { #ifdef REDHAT if (interrupted_system_call) { if ((sp1 = x86_is_entry_tramp_address (pc, &offset))) pc = sp1->value + offset; flag = EX_FRAME; } else { if (!XEN_HYPER_MODE() && !is_kernel_thread(bt->task) && (bt->stacktop == machdep->get_stacktop(bt->task))) { if (((ulong)(bp+4) + SIZE(pt_regs)) > bt->stacktop) flag = INCOMPLETE_EX_FRAME; else if ((sp1 = eframe_label(NULL, pc)) && STREQ(sp1->name, "system_call")) flag = EX_FRAME|SET_EX_FRAME_ADDR; else if (STREQ(closest_symbol(pc), "ret_from_fork")) flag = EX_FRAME|SET_EX_FRAME_ADDR; else if (userspace_return(bp, bt)) flag = EX_FRAME|SET_EX_FRAME_ADDR; else { curframe->error = KLE_BAD_RA; flag = 0; } } else { curframe->error = KLE_BAD_RA; flag = 0; } } #else curframe->error = KLE_BAD_RA; #endif UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, 0, 0, 0, flag); return(trace->nframes); } } UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, 0, 0, 0, 0); curframe->frame_size = curframe->fp - curframe->sp + 4; /* Gather starting information for the next frame */ pc = get_call_pc(ra); #ifdef USE_FRAMEPTRS kl_get_kaddr(bp, &bp); if (KL_ERROR) { curframe->error = 2; return(trace->nframes); } #else /* It's possible for get_framesize() to return a size * that is larger than the actual frame size (because * all it does is count the push, pop, addl, and subl * instructions that effect the SP). If we are real near * the top of the stack, this might cause bp to overflow. * This will be fixed above, but we need to bring bp * back into the legal range so we don't crap out * before we can get to it... */ #ifdef REDHAT frame_size = get_framesize(pc, bt); interrupted_system_call = FALSE; #else frame_size = get_framesize(pc); #endif if ((curframe->fp + frame_size) >= saddr) { bp = saddr - 4; } else { bp = curframe->fp + frame_size; } #endif func_name = kl_funcname(pc); if (func_name && !XEN_HYPER_MODE()) { if (strstr(func_name, "kernel_thread")) { ra = 0; bp = saddr - 4; asp = (uaddr_t*) ((uaddr_t)sbp + (STACK_SIZE - 12)); curframe = alloc_sframe(trace, flags); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, 16, 0); return(trace->nframes); } else if (strstr(func_name, "is386")) { ra = 0; bp = sp = saddr - 4; asp = curframe->asp; curframe = alloc_sframe(trace, flags); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, 0, 0); return(trace->nframes); } else if (STREQ(func_name, "ret_from_fork")) { ra = 0; bp = sp = saddr - 4; asp = curframe->asp; curframe = alloc_sframe(trace, flags); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, 0, EX_FRAME|SET_EX_FRAME_ADDR); return(trace->nframes); #ifdef REDHAT } else if (STREQ(func_name, "cpu_idle")) { ra = 0; bp = sp = saddr - 4; asp = curframe->asp; curframe = alloc_sframe(trace, flags); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, 0, 0); return(trace->nframes); } else if (strstr(func_name, "system_call") || strstr(func_name, "sysenter_past_esp") || eframe_label(func_name, pc) || strstr(func_name, "syscall_call") || strstr(func_name, "signal_return") || strstr(func_name, "reschedule") || kernel_entry_from_user_space(curframe, bt)) { #else } else if (strstr(func_name, "system_call")) { #endif /* * user exception frame, kernel stack ends * here. */ bp = saddr - 4; sp = curframe->fp + 4; #ifdef REDHAT ra = GET_STACK_ULONG(bp-16); #else kl_get_kaddr(bp-16, &ra); #endif curframe = alloc_sframe(trace, flags); asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, (bp - sp + 4), EX_FRAME); return(trace->nframes); #ifdef REDHAT } else if (strstr(func_name, "error_code") || STREQ(func_name, "nmi_stack_correct") || STREQ(func_name, "nmi")) { #else } else if (strstr(func_name, "error_code")) { #endif /* an exception frame */ sp = curframe->fp + eframe_incr(pc, func_name); bp = sp + (KERNEL_EFRAME_SZ-1)*4; asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); curframe = alloc_sframe(trace, flags); ra = asp[INT_EFRAME_EIP]; frame_type = eframe_type(asp); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, (bp - sp + 4), EX_FRAME); /* prepare for next kernel frame, if present */ if (frame_type == KERNEL_EFRAME) { pc = asp[INT_EFRAME_EIP]; sp = curframe->fp+4; #ifdef REDHAT bp = sp + get_framesize(pc, bt); #else bp = sp + get_framesize(pc); #endif func_name = kl_funcname(pc); continue; } else { return(trace->nframes); } } else if (is_task_active(bt->task) && (strstr(func_name, "call_do_IRQ") || strstr(func_name, "common_interrupt") || strstr(func_name, "reboot_interrupt") || strstr(func_name, "call_function_interrupt"))) { /* Interrupt frame */ sp = curframe->fp + 4; asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); frame_type = eframe_type(asp); if (frame_type == KERNEL_EFRAME) bp = curframe->fp+(KERNEL_EFRAME_SZ-1)*4; else bp = curframe->fp+(USER_EFRAME_SZ-1)*4; curframe = alloc_sframe(trace, flags); ra = asp[INT_EFRAME_EIP]; UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, 0, 0, curframe->fp - curframe->sp+4, EX_FRAME); /* prepare for next kernel frame, if present */ if (frame_type == KERNEL_EFRAME) { sp = curframe->fp + 4; pc = asp[INT_EFRAME_EIP]; #ifdef REDHAT bp = sp + get_framesize(pc, bt); #else bp = sp + get_framesize(pc); #endif func_name = kl_funcname(pc); #ifdef REDHAT /* interrupted system_call entry */ if (STREQ(func_name, "system_call")) interrupted_system_call = TRUE; #endif continue; } else { return trace->nframes; } } } if (func_name && XEN_HYPER_MODE()) { if (STREQ(func_name, "continue_nmi") || STREQ(func_name, "vmx_asm_vmexit_handler") || STREQ(func_name, "common_interrupt") || STREQ(func_name, "handle_nmi_mce") || STREQ(func_name, "deferred_nmi")) { /* Interrupt frame */ sp = curframe->fp + 4; asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); bp = curframe->fp + (12 * 4); curframe = alloc_sframe(trace, flags); ra = *(asp + 9); UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, 0, 0, curframe->fp - curframe->sp+4, 12 * 4); /* contunue next frame */ pc = ra; sp = curframe->fp + 4; bp = sp + get_framesize(pc, bt); func_name = kl_funcname(pc); if (!func_name) return trace->nframes; continue; } } /* * Check for hypervisor_callback from user-space. */ if ((bt->flags & BT_XEN_STOP_THIS_CPU) && bt->tc->mm_struct && STREQ(kl_funcname(curframe->pc), "hypervisor_callback")) { pt = curframe->asp+1; if (eframe_type(pt) == USER_EFRAME) { if (program_context.debug >= 1) /* pc above */ error(INFO, "hypervisor_callback from user space\n"); curframe->asp++; curframe->flag |= EX_FRAME; return(trace->nframes); } } /* Make sure our next frame pointer is valid (in the stack). */ if ((bp < sbase) || (bp >= saddr)) { curframe->error = 3; return(trace->nframes); } sp = curframe->fp + 4; } return(trace->nframes); } static int kernel_entry_from_user_space(sframe_t *curframe, struct bt_info *bt) { if (is_kernel_thread(bt->tc->task)) return FALSE; if (((curframe->fp + 4 + SIZE(pt_regs)) == GET_STACKTOP(bt->task)) && !is_kernel_thread(bt->tc->task)) return TRUE; else if (userspace_return(curframe->fp+4, bt)) return TRUE; else return FALSE; } #ifndef REDHAT /* * pc_offset() */ int pc_offset(kaddr_t pc) { kaddr_t func_addr; if ((func_addr = kl_funcaddr(pc))) { return(pc - func_addr); } return(-1); } #endif /* !REDHAT */ /* * dump_stack_frame() */ void dump_stack_frame(trace_t *trace, sframe_t *curframe, FILE *ofp) { int i, first_time = 1; kaddr_t sp; uaddr_t *asp; char buf[BUFSIZE]; sp = curframe->sp; asp = curframe->asp; for (i = 0; i < curframe->frame_size / 4; i++) { if (!(i % 4)) { if (first_time) { first_time = 0; #ifdef REDHAT fprintf(ofp, " %x: %s ", sp, format_stack_entry(trace->bt, buf, *asp++, 0)); #else fprintf(ofp, " %x: %08x ", sp, *asp++); #endif } else { #ifdef REDHAT fprintf(ofp, "\n %x: ", sp); #else fprintf(ofp, "\n %x: ", sp); #endif fprintf(ofp, "%s ", format_stack_entry(trace->bt, buf, *asp++, 0)); } sp += 16; } else { fprintf(ofp, "%s ", format_stack_entry(trace->bt, buf, *asp++, 0)); } } if (curframe->frame_size) { #ifdef REDHAT fprintf(ofp, "\n"); #else fprintf(ofp, "\n\n"); #endif } } /* * eframe_address() */ static uaddr_t * eframe_address(sframe_t *frmp, struct bt_info *bt) { ulong esp0, pt; if (!(frmp->flag & SET_EX_FRAME_ADDR) || INVALID_MEMBER(task_struct_thread) || (((esp0 = MEMBER_OFFSET("thread_struct", "esp0")) < 0) && ((esp0 = MEMBER_OFFSET("thread_struct", "sp0")) < 0))) return frmp->asp; /* * Work required in rarely-seen SET_EX_FRAME_ADDR circumstances. */ pt = ULONG(tt->task_struct + OFFSET(task_struct_thread) + esp0) - SIZE(pt_regs); if (!INSTACK(pt, bt)) return frmp->asp; return ((uint32_t *)(bt->stackbuf + (pt - bt->stackbase))); } /* * print_trace() */ void print_trace(trace_t *trace, int flags, FILE *ofp) { sframe_t *frmp; #ifdef REDHAT kaddr_t fp = 0; kaddr_t last_fp ATTRIBUTE_UNUSED; kaddr_t last_pc, next_fp, next_pc; struct bt_info *bt; bt = trace->bt; last_fp = last_pc = next_fp = next_pc = 0; #else int offset; #endif if ((frmp = trace->frame)) { do { #ifdef REDHAT if (trace->bt->flags & BT_LOOP_TRAP) { if (frmp->prev && frmp->error && (frmp->pc == frmp->prev->pc) && (frmp->fp == frmp->prev->fp)) goto print_trace_error; } if ((trace->bt->flags & BT_WRAP_TRAP) && frmp->error) goto print_trace_error; /* * We're guaranteed to run into an error when unwinding * a hard or soft IRQ stack, so just bail with success. */ if ((frmp->next != trace->frame) && frmp->next->error && (bt->flags & (BT_LOOP_TRAP|BT_WRAP_TRAP)) && (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ))) return; if ((frmp->level == 0) && (bt->flags & BT_XEN_STOP_THIS_CPU)) { print_stack_entry(trace->bt, 0, trace->bt->stkptr, symbol_value("stop_this_cpu"), value_symbol(symbol_value("stop_this_cpu")), frmp, ofp); } print_stack_entry(trace->bt, (trace->bt->flags & (BT_BUMP_FRAME_LEVEL|BT_XEN_STOP_THIS_CPU)) ? frmp->level + 1 : frmp->level, fp ? (ulong)fp : trace->bt->stkptr, (ulong)frmp->pc, frmp->funcname, frmp, ofp); if (trace->bt->flags & BT_LOOP_TRAP) { last_fp = fp ? (ulong)fp : trace->bt->stkptr; last_pc = frmp->pc; } fp = frmp->fp; #else fprintf(ofp, "%2d %s", frmp->level, frmp->funcname); offset = pc_offset(frmp->pc); if (offset > 0) { fprintf(ofp, "+%d", offset); } else if (offset < 0) { fprintf(ofp, "+"); } fprintf(ofp, " [0x%x]\n", frmp->pc); #endif if (frmp->flag & EX_FRAME) { if (CRASHDEBUG(1)) fprintf(ofp, " EXCEPTION FRAME: %lx\n", (unsigned long)frmp->sp); print_eframe(ofp, eframe_address(frmp, bt)); } #ifdef REDHAT if (CRASHDEBUG(1) && (frmp->flag & INCOMPLETE_EX_FRAME)) { fprintf(ofp, " INCOMPLETE EXCEPTION FRAME:\n"); fprintf(ofp, " user stacktop: %lx frame #%d: %lx (+pt_regs: %lx)\n", bt->stacktop, frmp->level, (ulong)frmp->fp, (ulong)frmp->fp + SIZE(pt_regs)); } if (trace->bt->flags & BT_FULL) { fprintf(ofp, " [RA: %x SP: %x FP: %x " "SIZE: %d]\n", frmp->ra, frmp->sp, frmp->fp, frmp->frame_size); dump_stack_frame(trace, frmp, ofp); } #else if (flags & C_FULL) { fprintf(ofp, "\n"); fprintf(ofp, " RA=0x%x, SP=0x%x, FP=0x%x, " "SIZE=%d\n\n", frmp->ra, frmp->sp, frmp->fp, frmp->frame_size); #ifdef FRMSIZE_DBG fprintf(ofp, "\n FRAMESIZE=%d\n\n", #ifdef REDHAT get_framesize(frmp->pc, bt)); #else get_framesize(frmp->pc)); #endif #endif dump_stack_frame(trace, frmp, ofp); } #endif /* !REDHAT */ if (frmp->error) { #ifdef REDHAT print_trace_error: KL_ERROR = KLE_PRINT_TRACE_ERROR; if (CRASHDEBUG(1) || trace->bt->debug) fprintf(ofp, "TRACE ERROR: 0x%llx %llx\n", frmp->error, trace->bt->flags); if (trace->bt->flags & BT_WRAP_TRAP) return; #else fprintf(ofp, "TRACE ERROR: 0x%llx\n", frmp->error); #endif } frmp = frmp->next; } while (frmp != trace->frame); } } /* * trace_banner() */ void trace_banner(FILE *ofp) { fprintf(ofp, "====================================================" "============\n"); } /* * task_trace() */ int #ifdef REDHAT lkcd_x86_back_trace(struct bt_info *bt, int flags, FILE *ofp) #else task_trace(kaddr_t task, int flags, FILE *ofp) #endif { void *tsp; kaddr_t saddr, eip, esp; trace_t *trace; #ifdef REDHAT int nframes = 0; kaddr_t task = bt->task; KL_ERROR = 0; tsp = NULL; if (bt->flags & BT_FRAMESIZE_DEBUG) return(framesize_debug(bt, ofp)); if (kt->flags & RA_SEEK) bt->flags |= BT_SPECULATE; if (XENDUMP_DUMPFILE() && XEN() && is_task_active(bt->task) && STREQ(kl_funcname(bt->instptr), "stop_this_cpu")) { /* * bt->instptr of "stop_this_cpu" is not a return * address -- replace it with the actual return * address found at the bt->stkptr location. */ if (readmem((ulong)bt->stkptr, KVADDR, &eip, sizeof(ulong), "xendump eip", RETURN_ON_ERROR)) bt->instptr = eip; bt->flags |= BT_XEN_STOP_THIS_CPU; if (CRASHDEBUG(1)) error(INFO, "replacing stop_this_cpu with %s\n", kl_funcname(bt->instptr)); } if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) && is_task_active(bt->task) && !(kt->xen_flags & XEN_SUSPEND) && STREQ(kl_funcname(bt->instptr), "schedule")) { /* * This is an invalid (stale) schedule reference * left in the task->thread. Move down the stack * until the smp_call_function_interrupt return * address is found. */ saddr = bt->stkptr; while (readmem(saddr, KVADDR, &eip, sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) { bt->instptr = eip; bt->stkptr = saddr; bt->flags |= BT_XEN_STOP_THIS_CPU; if (CRASHDEBUG(1)) error(INFO, "switch schedule to smp_call_function_interrupt\n"); break; } saddr -= sizeof(void *); if (saddr <= bt->stackbase) break; } } if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) && is_task_active(bt->task) && (kt->xen_flags & XEN_SUSPEND) && STREQ(kl_funcname(bt->instptr), "schedule")) { int framesize = 0; /* * This is an invalid (stale) schedule reference * left in the task->thread. Move down the stack * until the hypercall_page() return address is * found, and fix up its framesize as we go. */ saddr = bt->stacktop; while (readmem(saddr, KVADDR, &eip, sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { if (STREQ(kl_funcname(eip), "xen_idle")) framesize += sizeof(ulong); else if (framesize) framesize += sizeof(ulong); if (STREQ(kl_funcname(eip), "hypercall_page")) { int framesize = 24; bt->instptr = eip; bt->stkptr = saddr; if (CRASHDEBUG(1)) error(INFO, "switch schedule to hypercall_page (framesize: %d)\n", framesize); FRAMESIZE_CACHE_ENTER(eip, &framesize); break; } saddr -= sizeof(void *); if (saddr <= bt->stackbase) break; } } if (XENDUMP_DUMPFILE() && XEN() && !is_idle_thread(bt->task) && is_task_active(bt->task) && STREQ(kl_funcname(bt->instptr), "schedule")) { /* * This is an invalid (stale) schedule reference * left in the task->thread. Move down the stack * until the smp_call_function_interrupt return * address is found. */ saddr = bt->stacktop; while (readmem(saddr, KVADDR, &eip, sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) { bt->instptr = eip; bt->stkptr = saddr; bt->flags |= BT_XEN_STOP_THIS_CPU; if (CRASHDEBUG(1)) error(INFO, "switch schedule to smp_call_function_interrupt\n"); break; } saddr -= sizeof(void *); if (saddr <= bt->stackbase) break; } } if (!verify_back_trace(bt) && !recoverable(bt, ofp) && !BT_REFERENCE_CHECK(bt)) error(INFO, "cannot resolve stack trace:\n"); if (BT_REFERENCE_CHECK(bt)) return(0); #endif if (!XEN_HYPER_MODE()) { if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) { return(1); } if (kl_get_task_struct(task, 2, tsp)) { kl_free_block(tsp); return(1); } } trace = (trace_t *)alloc_trace_rec(C_TEMP); if (!trace) { #ifdef REDHAT error(INFO, "Could not alloc trace rec!\n"); #else fprintf(KL_ERRORFP, "Could not alloc trace rec!\n"); #endif return(1); } else { #ifdef REDHAT saddr = kl_kernelstack(bt->stackbase); eip = bt->instptr; esp = bt->stkptr; trace->bt = bt; #else saddr = kl_kernelstack(task); if (kl_smp_dumptask(task)) { eip = kl_dumpeip(task); esp = kl_dumpesp(task); } else { if (LINUX_2_2_X(KL_LINUX_RELEASE)) { eip = KL_UINT(K_PTR(tsp, "task_struct", "tss"), "thread_struct", "eip"); esp = KL_UINT(K_PTR(tsp, "task_struct", "tss"), "thread_struct", "esp"); } else { eip = KL_UINT( K_PTR(tsp, "task_struct", "thread"), "thread_struct", "eip"); esp = KL_UINT( K_PTR(tsp, "task_struct", "thread"), "thread_struct", "esp"); } } #endif if (esp < KL_PAGE_OFFSET || eip < KL_PAGE_OFFSET) { #ifdef REDHAT error(INFO, "Task in user space -- no backtrace\n"); #else fprintf(KL_ERRORFP, "Task in user space, No backtrace\n"); #endif return 1; } setup_trace_rec(saddr, 0, 0, trace); if (KL_ERROR) { #ifdef REDHAT error(INFO, "Error setting up trace rec!\n"); #else fprintf(KL_ERRORFP, "Error setting up trace rec!\n"); #endif free_trace_rec(trace); return(1); } #ifdef REDHAT nframes = find_trace(eip, esp, 0, 0, trace, 0); #else find_trace(eip, esp, 0, 0, trace, 0); trace_banner(ofp); fprintf(ofp, "STACK TRACE FOR TASK: 0x%x", task); if (KL_TYPEINFO()) { fprintf(ofp, "(%s)\n\n", (char *)K_PTR(tsp, "task_struct", "comm")); } else { fprintf(ofp, "(%s)\n\n", (char *)K_PTR(tsp, "task_struct", "comm")); } #endif print_trace(trace, flags, ofp); } if (!XEN_HYPER_MODE()) kl_free_block(tsp); free_trace_rec(trace); #ifdef REDHAT if (KL_ERROR == KLE_PRINT_TRACE_ERROR) { handle_trace_error(bt, nframes, ofp); return(1); } #endif return(0); } #ifdef REDHAT /* * Run find_trace() and check for any errors encountered. */ static int verify_back_trace(struct bt_info *bt) { void *tsp; kaddr_t saddr, eip, esp; int errcnt; trace_t *trace; sframe_t *frmp; errcnt = 0; KL_ERROR = 0; tsp = NULL; if (!XEN_HYPER_MODE()) { if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) return FALSE; if (kl_get_task_struct(bt->task, 2, tsp)) { kl_free_block(tsp); return FALSE; } } trace = (trace_t *)alloc_trace_rec(C_TEMP); if (!trace) return FALSE; saddr = kl_kernelstack(bt->stackbase); eip = bt->instptr; esp = bt->stkptr; trace->bt = bt; if (esp < KL_PAGE_OFFSET || eip < KL_PAGE_OFFSET) return FALSE; setup_trace_rec(saddr, 0, 0, trace); if (KL_ERROR) { free_trace_rec(trace); return FALSE; } find_trace(eip, esp, 0, 0, trace, 0); if ((frmp = trace->frame)) { do { if (frmp->error) { /* * We're guaranteed to run into an error when * unwinding and IRQ stack, so bail out without * reporting the error. */ if ((bt->flags & (BT_HARDIRQ|BT_SOFTIRQ)) && (bt->flags & (BT_LOOP_TRAP|BT_WRAP_TRAP))) break; errcnt++; if (!(bt->flags & BT_SPECULATE) && !bt->frameptr) bt->frameptr = frmp->fp; } if (BT_REFERENCE_CHECK(bt)) do_bt_reference_check(bt, frmp); frmp = frmp->next; } while (frmp != trace->frame); } if (!XEN_HYPER_MODE()) kl_free_block(tsp); free_trace_rec(trace); return (errcnt ? FALSE : TRUE); } /* * Check a frame for a requested reference. */ static void do_bt_reference_check(struct bt_info *bt, sframe_t *frmp) { int type; struct syment *sp; sp = frmp->prev && STREQ(frmp->funcname, "error_code") ? x86_jmp_error_code((ulong)frmp->prev->pc) : NULL; switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(kl_funcname(frmp->pc), bt->ref->str) || (sp && STREQ(sp->name, bt->ref->str))) bt->ref->cmdflags |= BT_REF_FOUND; break; case BT_REF_HEXVAL: if ((bt->ref->hexval == frmp->pc) || (sp && (bt->ref->hexval == sp->value))) bt->ref->cmdflags |= BT_REF_FOUND; if (frmp->flag & EX_FRAME) { type = eframe_type(frmp->asp); x86_dump_eframe_common(bt, (ulong *)frmp->asp, (type == KERNEL_EFRAME)); } break; } } /* * This function is a repository for "known" find_trace() failures that * can be "fixed" on the fly. * * Currently the routine only deals with BT_LOOP_TRAP/BT_WRAP_TRAP errors * where get_framesize() leaves the bp in an invalid location, where * where schedule() coming from schedule_timeout() is interrupted by a * false return address in between, those where the cpu_idle() trail * cannot be followed, and where the functions called by kernel_thread() * can't find their way back to kernel_thread(). As new fixable trace * instances are discovered, add them in. * * NOTE: the schedule() BT_LOOP_TRAP may have been subsequently fixed * by the get_framesize() adjustment for schedule(), but it's worth * keeping it around if a new schedule framesize anomoly pops up in * the future. */ static int recoverable(struct bt_info *bt, FILE *ofp) { ulong esp, eip; sframe_t sframe; struct stack_hook *hp; struct bt_info btloc; ulong kernel_thread; int calls_schedule; if (!(kt->flags & NO_RA_SEEK)) { BCOPY(bt, &btloc, sizeof(struct bt_info)); btloc.flags &= ~(ulonglong)BT_ERROR_MASK; btloc.flags |= BT_SPECULATE; if (verify_back_trace(&btloc)) { bt->flags &= ~(ulonglong)BT_ERROR_MASK; bt->flags |= BT_SPECULATE; if (CRASHDEBUG(1) || bt->debug) error(INFO, "recovered back trace with RA seek\n"); return TRUE; } } if (!gather_text_list(bt) || !STREQ(kl_funcname(bt->instptr), "schedule")) return FALSE; if (!is_idle_thread(bt->task) && !(bt->flags & BT_ERROR_MASK)) return FALSE; esp = eip = 0; calls_schedule = FALSE; kernel_thread = 0; for (hp = bt->textlist; hp->esp; hp++) { if (STREQ(kl_funcname(hp->eip), "kernel_thread")) { kernel_thread = hp->eip; continue; } if (!calls_schedule && STREQ(x86_function_called_by(hp->eip-5), "schedule")) calls_schedule = TRUE; if (STREQ(kl_funcname(hp->eip), "schedule_timeout")) { esp = hp->esp; eip = hp->eip; break; } if (STREQ(kl_funcname(hp->eip), "cpu_idle") && (bt->tc->pid == 0)) { esp = hp->esp; eip = hp->eip; bt->flags |= BT_CPU_IDLE; for ( ; BT_REFERENCE_CHECK(bt) && hp->esp; hp++) { if (STREQ(kl_funcname(hp->eip), "rest_init") || STREQ(kl_funcname(hp->eip), "start_kernel")) { BZERO(&sframe, sizeof(sframe_t)); sframe.pc = hp->eip; do_bt_reference_check(bt, &sframe); } } break; } } BCOPY(bt, &btloc, sizeof(struct bt_info)); btloc.flags &= ~(ulonglong)BT_ERROR_MASK; if (esp && eip) { btloc.instptr = eip; btloc.stkptr = esp; if (verify_back_trace(&btloc)) { if (CRASHDEBUG(1) || bt->debug) error(INFO, "recovered stack trace:\n"); if (!BT_REFERENCE_CHECK(bt)) fprintf(ofp, " #0 [%08lx] %s at %lx\n", bt->stkptr, kl_funcname(bt->instptr), bt->instptr); bt->instptr = eip; bt->stkptr = esp; bt->flags &= ~(ulonglong)BT_ERROR_MASK; bt->flags |= BT_BUMP_FRAME_LEVEL; FREEBUF(bt->textlist); return TRUE; } if (bt->flags & BT_CPU_IDLE) { if (CRASHDEBUG(1) || bt->debug) error(INFO, "recovered stack trace:\n"); return TRUE; } } if (kernel_thread && calls_schedule && is_kernel_thread(bt->tc->task)) { if (CRASHDEBUG(1) || bt->debug) error(INFO, "recovered stack trace:\n"); if (BT_REFERENCE_CHECK(bt)) { BZERO(&sframe, sizeof(sframe_t)); sframe.pc = kernel_thread; do_bt_reference_check(bt, &sframe); } bt->flags |= BT_KERNEL_THREAD; return TRUE; } return FALSE; } /* * If a trace is recoverable from this point finish it here. Otherwise, * if a back trace fails and is unrecoverable, dump the text symbols along * with any possible exception frames that can be found on the stack. */ static void handle_trace_error(struct bt_info *bt, int nframes, FILE *ofp) { int cnt, level; struct stack_hook *hp; if (CRASHDEBUG(2) || (bt->debug >= 2)) { for (hp = bt->textlist; hp->esp; hp++) { char *func; if ((func = x86_function_called_by(hp->eip-5))) fprintf(ofp, "%lx %s calls %s\n", hp->eip, kl_funcname(hp->eip), func); } } if (bt->flags & BT_CPU_IDLE) { for (hp = bt->textlist, level = 2; hp->esp; hp++) { if (STREQ(kl_funcname(hp->eip), "rest_init") || STREQ(kl_funcname(hp->eip), "start_kernel")) print_stack_entry(bt, level++, hp->esp, hp->eip, kl_funcname(hp->eip), NULL, ofp); } FREEBUF(bt->textlist); return; } if (bt->flags & BT_KERNEL_THREAD) { for (hp = bt->textlist; hp->esp; hp++) { if (STREQ(kl_funcname(hp->eip), "kernel_thread")) print_stack_entry(bt, nframes-1, hp->esp, hp->eip, "kernel_thread", NULL, ofp); } FREEBUF(bt->textlist); return; } error(INFO, "text symbols on stack:\n"); bt->flags |= BT_TEXT_SYMBOLS_PRINT|BT_ERROR_MASK; back_trace(bt); if (!XEN_HYPER_MODE()) { bt->flags = BT_EFRAME_COUNT; if ((cnt = machdep->eframe_search(bt))) { error(INFO, "possible exception frame%s:\n", cnt > 1 ? "s" : ""); bt->flags &= ~(ulonglong)BT_EFRAME_COUNT; machdep->eframe_search(bt); } } } /* * Print a stack entry, and its line number if requested. */ static void print_stack_entry(struct bt_info *bt, int level, ulong esp, ulong eip, char *funcname, sframe_t *frmp, FILE *ofp) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; struct syment *sp; struct load_module *lm; if (frmp && frmp->prev && STREQ(frmp->funcname, "error_code") && (sp = x86_jmp_error_code((ulong)frmp->prev->pc))) sprintf(buf1, " (via %s)", sp->name); else if (frmp && (STREQ(frmp->funcname, "stext_lock") || STRNEQ(frmp->funcname, ".text.lock")) && (sp = x86_text_lock_jmp(eip, NULL))) sprintf(buf1, " (via %s)", sp->name); else buf1[0] = NULLCHAR; if ((sp = eframe_label(funcname, eip))) funcname = sp->name; fprintf(ofp, "%s#%d [%8lx] %s%s at %lx", level < 10 ? " " : "", level, esp, funcname_display(funcname, eip, bt, buf2), strlen(buf1) ? buf1 : "", eip); if (module_symbol(eip, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(eip, buf1, FALSE); if (strlen(buf1)) fprintf(ofp, " %s\n", buf1); } } /* * The new process accounting stuff installs a label between system_call and * ret_from_sys_call, confusing the code that recognizes exception frame * symbols. This function has been put in place to catch that anomoly, as * well as serving as a template for any future labels that get placed in the * kernel entry point code. It returns the syment of the "real" kernel entry * point. */ #define EFRAME_LABELS 10 static struct eframe_labels { int init; ulong syscall_labels[EFRAME_LABELS]; struct syment *syscall; struct syment *syscall_end; ulong tracesys_labels[EFRAME_LABELS]; struct syment *tracesys; struct syment *tracesys_exit; ulong sysenter_labels[EFRAME_LABELS]; struct syment *sysenter; struct syment *sysenter_end; } eframe_labels = { 0 }; static struct syment * eframe_label(char *funcname, ulong eip) { int i; struct eframe_labels *efp; struct syment *sp; if (XEN_HYPER_MODE()) return NULL; /* ODA: need support ? */ efp = &eframe_labels; if (!efp->init) { if (!(efp->syscall = symbol_search("system_call"))) error(WARNING, "\"system_call\" symbol does not exist\n"); if ((sp = symbol_search("ret_from_sys_call"))) efp->syscall_end = sp; else if ((sp = symbol_search("syscall_badsys"))) efp->syscall_end = sp; else error(WARNING, "neither \"ret_from_sys_call\" nor \"syscall_badsys\" symbols exist\n"); if (efp->syscall) { efp->tracesys = symbol_search("tracesys"); efp->tracesys_exit = symbol_search("tracesys_exit"); } if ((efp->sysenter = symbol_search("sysenter_entry")) || (efp->sysenter = symbol_search("ia32_sysenter_target"))) { if ((sp = symbol_search("sysexit_ret_end_marker"))) efp->sysenter_end = sp; else if (THIS_KERNEL_VERSION >= LINUX(2,6,32)) { if ((sp = symbol_search("sysexit_audit")) || (sp = symbol_search("sysenter_exit"))) efp->sysenter_end = next_symbol(NULL, sp); else error(WARNING, "cannot determine end of %s function\n", efp->sysenter->name); } else if ((sp = symbol_search("system_call"))) efp->sysenter_end = sp; else error(WARNING, "neither \"sysexit_ret_end_marker\" nor \"system_call\" symbols exist\n"); } efp->init = TRUE; } /* * First search for the currently-known system_call labels. */ for (i = 0; (i < EFRAME_LABELS) && efp->syscall_labels[i]; i++) { if (efp->syscall_labels[i] == eip) return efp->syscall; } for (i = 0; (i < EFRAME_LABELS) && efp->tracesys_labels[i]; i++) { if (efp->tracesys_labels[i] == eip) return efp->syscall; } for (i = 0; (i < EFRAME_LABELS) && efp->sysenter_labels[i]; i++) { if (efp->sysenter_labels[i] == eip) return efp->sysenter; } /* * If the eip fits in any of the label arrays, try to store it, * but always return the real function it's referencing. */ if (efp->syscall && efp->syscall_end) { if (((eip >= efp->syscall->value) && (eip < efp->syscall_end->value))) { for (i = 0; i < EFRAME_LABELS; i++) if (!efp->syscall_labels[i]) efp->syscall_labels[i] = eip; return efp->syscall; } } if (efp->tracesys && efp->tracesys_exit) { if (((eip >= efp->tracesys->value) && (eip < efp->tracesys_exit->value))) { for (i = 0; i < EFRAME_LABELS; i++) if (!efp->tracesys_labels[i]) efp->tracesys_labels[i] = eip; return efp->syscall; } } if (efp->sysenter && efp->sysenter_end) { if (((eip >= efp->sysenter->value) && (eip < efp->sysenter_end->value))) { for (i = 0; i < EFRAME_LABELS; i++) if (!efp->sysenter_labels[i]) efp->sysenter_labels[i] = eip; return efp->sysenter; } } return NULL; } /* * If it makes sense to display a different function/label name * in a stack entry, it can be done here. Unlike eframe_label(), * this routine won't cause the passed-in function name pointer * to be changed -- this is strictly for display purposes only. */ static char * funcname_display(char *funcname, ulong eip, struct bt_info *bt, char *buf) { struct syment *sp; ulong offset; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(eip, &offset); if (sp && offset) return value_to_symstr(eip, buf, bt->radix); } if (STREQ(funcname, "nmi_stack_correct") && (sp = symbol_search("nmi"))) return sp->name; return funcname; } /* * Cache 2k starting from the passed-in text address. This sits on top * of the instrbuf 256-byte cache, but we don't want to extend its size * because we can run off the end of a module segment -- if this routine * does so, it's benign. Tests of "foreach bt" result in more than an * 80% cache-hit rate. */ #define TEXT_BLOCK_SIZE (2048) static void fill_instr_cache(kaddr_t pc, char *buf) { static kaddr_t last_block = 0; static char block[TEXT_BLOCK_SIZE]; ulong offset; if ((pc >= last_block) && ((pc+256) < (last_block+TEXT_BLOCK_SIZE))) { offset = pc - last_block; } else { if (readmem(pc, KVADDR, block, TEXT_BLOCK_SIZE, "fill_instr_cache", RETURN_ON_ERROR|QUIET)) { last_block = pc; offset = 0; } else { GET_BLOCK(pc, 256, block); last_block = 0; offset = 0; } } BCOPY(&block[offset], buf, 256); } #endif /* * print_traces() * * Output a list of all valid code addresses contained in a stack * along with their function name and stack location. */ int #ifdef REDHAT print_traces(struct bt_info *bt, int level, int flags, FILE *ofp) #else print_traces(kaddr_t saddr, int level, int flags, FILE *ofp) #endif { int nfrms; char *fname, *cfname; uaddr_t *wordp, *stackp; trace_t *trace; kaddr_t addr, isp, caddr, sbase; #ifdef REDHAT kaddr_t saddr = bt->stkptr; #endif stackp = (uaddr_t*)kl_alloc_block(STACK_SIZE, K_TEMP); sbase = saddr - STACK_SIZE; GET_BLOCK(sbase, STACK_SIZE, stackp); if (KL_ERROR) { kl_free_block(stackp); return(1); } if (!(trace = (trace_t *)alloc_trace_rec(K_TEMP))) { #ifdef REDHAT error(INFO, "Could not alloc trace rec!\n"); #else fprintf(KL_ERRORFP, "Could not alloc trace rec!\n"); #endif kl_free_block(stackp); return(1); } setup_trace_rec(saddr, 0, 0, trace); #ifdef REDHAT trace->bt = bt; #endif wordp = stackp; while(wordp < (stackp + (STACK_SIZE / 4))) { if ((addr = (kaddr_t)(*(uaddr_t*)wordp))) { /* check to see if this is a valid code address */ if ((fname = kl_funcname(addr))) { /* Now use the instruction to back up and * see if this RA was saved after a call. * If it was, then try to determine what * function was called. At the very least, * only print out info for true return * addresses (coming right after a call * instruction -- even if we can't tell * what function was called). */ isp = sbase + (((uaddr_t)wordp) - ((uaddr_t)stackp)); cfname = (char *)NULL; caddr = 0; if (get_jmp_instr(addr, isp, &caddr, fname, &cfname)) { wordp++; continue; } /* We have found a valid jump address. Now, * try and get a backtrace. */ nfrms = find_trace(addr, isp, 0, 0, trace, 0); if (nfrms) { if ((nfrms >= level) && (!trace->frame->prev->error || (flags & C_ALL))) { fprintf(ofp, "\nPC="); print_kaddr(addr, ofp, 0); fprintf(ofp, " SP="); print_kaddr(isp, ofp, 0); fprintf(ofp, " SADDR="); print_kaddr(saddr, ofp, 0); fprintf(ofp, "\n"); trace_banner(ofp); print_trace(trace, flags, ofp); trace_banner(ofp); } free_sframes(trace); } } wordp++; } else { wordp++; } } kl_free_block(stackp); return(0); } /* * do_list() * * Output a list of all valid code addresses contained in a stack * along with their function name and stack location. */ int #ifdef REDHAT do_text_list(kaddr_t saddr, int size, FILE *ofp) #else do_list(kaddr_t saddr, int size, FILE *ofp) #endif { char *fname, *cfname; uaddr_t *wordp, *stackp; kaddr_t addr, isp, caddr, sbase; stackp = (uaddr_t*)kl_alloc_block(size, K_TEMP); sbase = saddr - size; GET_BLOCK(sbase, size, stackp); if (KL_ERROR) { kl_free_block(stackp); return(1); } wordp = stackp; while(wordp < (stackp + (size / 4))) { if ((addr = (kaddr_t)(*(uaddr_t*)wordp))) { /* check to see if this is a valid code address */ if ((fname = kl_funcname(addr))) { /* Now use the instruction to back up and * see if this RA was saved after a call. * If it was, then try to determine what * function was called. At the very least, * only print out info for true return * addresses (coming right after a call * instruction -- even if we can't tell * what function was called). */ isp = sbase + (((uaddr_t)wordp) - ((uaddr_t)stackp)); cfname = (char *)NULL; caddr = 0; if (get_jmp_instr(addr, isp, &caddr, fname, &cfname)) { wordp++; continue; } fprintf(ofp, "0x%x -- 0x%x (%s)", isp, addr, fname); if (cfname) { fprintf(ofp, " --> 0x%x (%s)\n", caddr, cfname); } else { fprintf(ofp, "\n"); } } wordp++; } else { wordp++; } } kl_free_block(stackp); return(0); } #ifndef REDHAT /* * add_frame() */ int add_frame(trace_t *trace, kaddr_t fp, kaddr_t ra) { sframe_t *cf, *sf; /* Check to make sure that sp is from the stack in the trace * record. * * XXX -- todo */ sf = (sframe_t *)alloc_sframe(trace, C_PERM); sf->fp = fp; sf->ra = ra; if ((cf = trace->frame)) { do { if (cf->fp && (sf->fp < cf->fp)) { if (cf->next == cf) { cf->prev = sf; sf->next = cf; cf->next = sf; sf->prev = cf; trace->frame = sf; } else { cf->prev->next = sf; sf->prev = cf->prev; cf->prev = sf; sf->next = cf; } return(0); } cf = cf->next; } while (cf != trace->frame); cf = 0; } if (!cf) { kl_enqueue((element_t **)&trace->frame, (element_t *)sf); } return(1); } /* * finish_trace() */ void finish_trace(trace_t *trace) { int level = 0, curstkidx = 0; uaddr_t *sbp; kaddr_t sbase, saddr; sframe_t *sf; sbp = trace->stack[curstkidx].ptr; sbase = trace->stack[curstkidx].addr; saddr = sbase + trace->stack[curstkidx].size; if ((sf = trace->frame)) { do { if (!sf->pc) { if (sf != trace->frame) { sf->sp = sf->prev->fp + 4; sf->pc = get_call_pc(sf->prev->ra); } if (!sf->pc) { sf = sf->next; continue; } } sf->level = level++; sf->frame_size = sf->fp - sf->sp + 4; sf->funcname = kl_funcname(sf->pc); sf->asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sf->sp))); sf = sf->next; } while (sf != trace->frame); if (level > 0) { sf = (sframe_t *)alloc_sframe(trace, C_PERM); sf->level = level; sf->sp = trace->frame->prev->fp + 4; sf->pc = get_call_pc(trace->frame->prev->ra); sf->funcname = kl_funcname(sf->pc); if (sf->funcname && strstr(sf->funcname, "kernel_thread")) { sf->ra = 0; sf->fp = saddr - 4; sf->asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - 12)); } else { sf->fp = saddr - 20; kl_get_kaddr(sf->fp, &sf->ra); sf->asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sf->sp))); } sf->frame_size = sf->fp - sf->sp + 4; kl_enqueue((element_t **)&trace->frame, (element_t *)sf); } } } /* * dumptask_trace() */ int dumptask_trace( kaddr_t curtask, dump_header_asm_t *dha, int flags, FILE *ofp) { kaddr_t eip, esp, saddr; void *tsp; trace_t *trace; int i; for (i = 0; i < dha->dha_smp_num_cpus; i++) { if (curtask == (kaddr_t)dha->dha_smp_current_task[i]) { eip = dha->dha_smp_regs[i].eip; esp = dha->dha_smp_regs[i].esp; break; } } tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP); if (!tsp) { return(1); } if (kl_get_task_struct(curtask, 2, tsp)) { kl_free_block(tsp); return(1); } if (!(trace = alloc_trace_rec(K_TEMP))) { fprintf(KL_ERRORFP, "Could not alloc trace rec!\n"); } else { saddr = kl_kernelstack(curtask); setup_trace_rec(saddr, 0, 0, trace); find_trace(eip, esp, 0, 0, trace, 0); trace_banner(ofp); fprintf(ofp, "STACK TRACE FOR TASK: 0x%"FMTPTR"x (%s)\n\n", curtask, (char*)K_PTR(tsp, "task_struct", "comm")); print_trace(trace, flags, ofp); trace_banner(ofp); free_trace_rec(trace); } return(0); } #endif /* !REDHAT */ /* * lkcdutils-4.1/lcrash/arch/i386/lib/dis.c */ /* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ #ifndef REDHAT #include #include #include #endif /* !REDHAT */ static int instr_buf_init = 1; static instr_buf_t instrbuf; static unsigned char *codeptr; /* Forward declarations for local functions */ static int seg_prefix(int); static int op_e(int, int, instr_rec_t *); static opcode_rec_t op_386[] = { /* 0x00 */ { "addb", Eb, Gb }, { "addS", Ev, Gv }, { "addb", Gb, Eb }, { "addS", Gv, Ev }, { "addb", AL, Ib }, { "addS", eAX, Iv }, { "pushS", es }, { "popS", es }, /* 0x08 */ { "orb", Eb, Gb }, { "orS", Ev, Gv }, { "orb", Gb, Eb }, { "orS", Gv, Ev }, { "orb", AL, Ib }, { "orS", eAX, Iv }, { "pushS", cs }, { "(bad)", BAD }, /* 0x10 */ { "adcb", Eb, Gb }, { "adcS", Ev, Gv }, { "adcb", Gb, Eb }, { "adcS", Gv, Ev }, { "adcb", AL, Ib }, { "adcS", eAX, Iv }, { "pushS", ss }, { "popS", ss }, /* 0x18 */ { "sbbb", Eb, Gb }, { "sbbS", Ev, Gv }, { "sbbb", Gb, Eb }, { "sbbS", Gv, Ev }, { "sbbb", AL, Ib }, { "sbbS", eAX, Iv }, { "pushS", ds }, { "popS", ds }, /* 0x20 */ { "andb", Eb, Gb }, { "andS", Ev, Gv }, { "andb", Gb, Eb }, { "andS", Gv, Ev }, { "andb", AL, Ib }, { "andS", eAX, Iv }, { "(bad)", BAD }, /* SEG ES prefix */ { "daa", NONE }, /* 0x28 */ { "subb", Eb, Gb }, { "subS", Ev, Gv }, { "subb", Gb, Eb }, { "subS", Gv, Ev }, { "subb", AL, Ib }, { "subS", eAX, Iv }, { "(bad)", BAD }, /* SEG CS prefix */ { "das", NONE }, /* 0x30 */ { "xorb", Eb, Gb }, { "xorS", Ev, Gv }, { "xorb", Gb, Eb }, { "xorS", Gv, Ev }, { "xorb", AL, Ib }, { "xorS", eAX, Iv }, { "(bad)", BAD }, /* SEG SS prefix */ { "aaa", NONE }, /* 0x38 */ { "cmpb", Eb, Gb }, { "cmpS", Ev, Gv }, { "cmpb", Gb, Eb }, { "cmpS", Gv, Ev }, { "cmpb", AL, Ib }, { "cmpS", eAX, Iv }, { "(bad)", BAD }, /* SEG DS previx */ { "aas", NONE }, /* 0x40 */ { "incS", eAX }, { "incS", eCX }, { "incS", eDX }, { "incS", eBX }, { "incS", eSP }, { "incS", eBP }, { "incS", eSI }, { "incS", eDI }, /* 0x48 */ { "decS", eAX }, { "decS", eCX }, { "decS", eDX }, { "decS", eBX }, { "decS", eSP }, { "decS", eBP }, { "decS", eSI }, { "decS", eDI }, /* 0x50 */ { "pushS", eAX }, { "pushS", eCX }, { "pushS", eDX }, { "pushS", eBX }, { "pushS", eSP }, { "pushS", eBP }, { "pushS", eSI }, { "pushS", eDI }, /* 0x58 */ { "popS", eAX }, { "popS", eCX }, { "popS", eDX }, { "popS", eBX }, { "popS", eSP }, { "popS", eBP }, { "popS", eSI }, { "popS", eDI }, /* 0x60 */ { "pusha", NONE }, { "popa", NONE }, { "boundS", Gv, Ma }, { "arpl", Ew, Gw }, { "(bad)", BAD }, /* seg fs */ { "(bad)", BAD }, /* seg gs */ { "(bad)", BAD }, /* op size prefix */ { "(bad)", BAD }, /* adr size prefix */ /* 0x68 */ { "pushS", Iv }, { "imulS", Gv, Ev, Iv }, { "pushS", sIb }, /* push of byte really pushes 2 or 4 bytes */ { "imulS", Gv, Ev, Ib }, { "insb", Yb, indirDX }, { "insS", Yv, indirDX }, { "outsb", indirDX, Xb }, { "outsS", indirDX, Xv }, /* 0x70 */ { "jo", Jb }, { "jno", Jb }, { "jb", Jb }, { "jae", Jb }, { "je", Jb }, { "jne", Jb }, { "jbe", Jb }, { "ja", Jb }, /* 0x78 */ { "js", Jb }, { "jns", Jb }, { "jp", Jb }, { "jnp", Jb }, { "jl", Jb }, { "jnl", Jb }, { "jle", Jb }, { "jg", Jb }, /* 0x80 */ { GRP1b }, { GRP1S }, { "(bad)", BAD }, { GRP1Ss }, { "testb", Eb, Gb }, { "testS", Ev, Gv }, { "xchgb", Eb, Gb }, { "xchgS", Ev, Gv }, /* 0x88 */ { "movb", Eb, Gb }, { "movS", Ev, Gv }, { "movb", Gb, Eb }, { "movS", Gv, Ev }, { "movw", Ew, Sw }, { "leaS", Gv, M }, { "movw", Sw, Ew }, { "popS", Ev }, /* 0x90 */ { "nop", NONE }, { "xchgS", eCX, eAX }, { "xchgS", eDX, eAX }, { "xchgS", eBX, eAX }, { "xchgS", eSP, eAX }, { "xchgS", eBP, eAX }, { "xchgS", eSI, eAX }, { "xchgS", eDI, eAX }, /* 0x98 */ { "cWtS", NONE }, { "cStd", NONE }, { "lcall", Ap }, { "(bad)", BAD }, /* fwait */ { "pushf", NONE }, { "popf", NONE }, { "sahf", NONE }, { "lahf", NONE }, /* 0xa0 */ { "movb", AL, Ob }, { "movS", eAX, Ov }, { "movb", Ob, AL }, { "movS", Ov, eAX }, { "movsb", Yb, Xb }, { "movsS", Yv, Xv }, { "cmpsb", Yb, Xb }, { "cmpsS", Yv, Xv }, /* 0xa8 */ { "testb", AL, Ib }, { "testS", eAX, Iv }, { "stosb", Yb, AL }, { "stosS", Yv, eAX }, { "lodsb", AL, Xb }, { "lodsS", eAX, Xv }, { "scasb", AL, Yb }, { "scasS", eAX, Yv }, /* 0xb0 */ { "movb", AL, Ib }, { "movb", CL, Ib }, { "movb", DL, Ib }, { "movb", BL, Ib }, { "movb", AH, Ib }, { "movb", CH, Ib }, { "movb", DH, Ib }, { "movb", BH, Ib }, /* 0xb8 */ { "movS", eAX, Iv }, { "movS", eCX, Iv }, { "movS", eDX, Iv }, { "movS", eBX, Iv }, { "movS", eSP, Iv }, { "movS", eBP, Iv }, { "movS", eSI, Iv }, { "movS", eDI, Iv }, /* 0xc0 */ { GRP2b }, { GRP2S }, { "ret", Iw }, { "ret", NONE }, { "lesS", Gv, Mp }, { "ldsS", Gv, Mp }, { "movb", Eb, Ib }, { "movS", Ev, Iv }, /* 0xc8 */ { "enter", Iw, Ib }, { "leave", NONE }, { "lret", Iw }, { "lret", NONE }, { "int3", NONE }, { "int", Ib }, { "into", NONE }, { "iret", NONE }, /* 0xd0 */ { GRP2b_one }, { GRP2S_one }, { GRP2b_cl }, { GRP2S_cl }, { "aam", Ib }, { "aad", Ib }, { "(bad)", BAD }, { "xlat", NONE }, /* 0xd8 */ { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, /* 0xe0 */ { "loopne", Jb }, { "loope", Jb }, { "loop", Jb }, { "jCcxz", Jb }, { "inb", AL, Ib }, { "inS", eAX, Ib }, { "outb", Ib, AL }, { "outS", Ib, eAX }, /* 0xe8 */ { "call", Av }, { "jmp", Jv }, { "ljmp", Ap }, { "jmp", Jb }, { "inb", AL, indirDX }, { "inS", eAX, indirDX }, { "outb", indirDX, AL }, { "outS", indirDX, eAX }, /* 0xf0 */ { "(bad)", BAD }, /* lock prefix */ { "(bad)", BAD }, { "(bad)", BAD }, /* repne */ { "(bad)", BAD }, /* repz */ { "hlt", NONE }, { "cmc", NONE }, { GRP3b }, { GRP3S }, /* 0xf8 */ { "clc", NONE }, { "stc", NONE }, { "cli", NONE }, { "sti", NONE }, { "cld", NONE }, { "std", NONE }, { GRP4 }, { GRP5 }, }; static opcode_rec_t op_386_twobyte[] = { /* 0x00 */ { GRP6 }, { GRP7 }, { "larS", Gv, Ew }, { "lslS", Gv, Ew }, { "(bad)", BAD }, { "(bad)", BAD }, { "clts", NONE }, { "(bad)", BAD }, /* 0x08 */ { "invd", NONE }, { "wbinvd", NONE }, { "(bad)", BAD }, { "ud2a", NONE }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x10 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x18 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x20 */ /* these are all backward in appendix A of the intel book */ { "movl", Rd, Cd }, { "movl", Rd, Dd }, { "movl", Cd, Rd }, { "movl", Dd, Rd }, { "movl", Rd, Td }, { "(bad)", BAD }, { "movl", Td, Rd }, { "(bad)", BAD }, /* 0x28 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x30 */ { "wrmsr", NONE }, { "rdtsc", NONE }, { "rdmsr", NONE }, { "rdpmc", NONE }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x38 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x40 */ { "cmovo", Gv,Ev }, { "cmovno", Gv,Ev }, { "cmovb", Gv,Ev }, { "cmovae", Gv,Ev }, { "cmove", Gv,Ev }, { "cmovne", Gv,Ev }, { "cmovbe", Gv,Ev }, { "cmova", Gv,Ev }, /* 0x48 */ { "cmovs", Gv,Ev }, { "cmovns", Gv,Ev }, { "cmovp", Gv,Ev }, { "cmovnp", Gv,Ev }, { "cmovl", Gv,Ev }, { "cmovge", Gv,Ev }, { "cmovle", Gv,Ev }, { "cmovg", Gv,Ev }, /* 0x50 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x58 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x60 */ { "punpcklbw", MX, EM }, { "punpcklwd", MX, EM }, { "punpckldq", MX, EM }, { "packsswb", MX, EM }, { "pcmpgtb", MX, EM }, { "pcmpgtw", MX, EM }, { "pcmpgtd", MX, EM }, { "packuswb", MX, EM }, /* 0x68 */ { "punpckhbw", MX, EM }, { "punpckhwd", MX, EM }, { "punpckhdq", MX, EM }, { "packssdw", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, { "movd", MX, Ev }, { "movq", MX, EM }, /* 0x70 */ { "(bad)", BAD }, { GRP10 }, { GRP11 }, { GRP12 }, { "pcmpeqb", MX, EM }, { "pcmpeqw", MX, EM }, { "pcmpeqd", MX, EM }, { "emms" , NONE }, /* 0x78 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "movd", Ev, MX }, { "movq", EM, MX }, /* 0x80 */ { "jo", Jv }, { "jno", Jv }, { "jb", Jv }, { "jae", Jv }, { "je", Jv }, { "jne", Jv }, { "jbe", Jv }, { "ja", Jv }, /* 0x88 */ { "js", Jv }, { "jns", Jv }, { "jp", Jv }, { "jnp", Jv }, { "jl", Jv }, { "jge", Jv }, { "jle", Jv }, { "jg", Jv }, /* 0x90 */ { "seto", Eb }, { "setno", Eb }, { "setb", Eb }, { "setae", Eb }, { "sete", Eb }, { "setne", Eb }, { "setbe", Eb }, { "seta", Eb }, /* 0x98 */ { "sets", Eb }, { "setns", Eb }, { "setp", Eb }, { "setnp", Eb }, { "setl", Eb }, { "setge", Eb }, { "setle", Eb }, { "setg", Eb }, /* 0xa0 */ { "pushS", fs }, { "popS", fs }, { "cpuid", NONE }, { "btS", Ev, Gv }, { "shldS", Ev, Gv, Ib }, { "shldS", Ev, Gv, CL }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0xa8 */ { "pushS", gs }, { "popS", gs }, { "rsm", NONE }, { "btsS", Ev, Gv }, { "shrdS", Ev, Gv, Ib }, { "shrdS", Ev, Gv, CL }, { "(bad)", BAD }, { "imulS", Gv, Ev }, /* 0xb0 */ { "cmpxchgb", Eb, Gb }, { "cmpxchgS", Ev, Gv }, { "lssS", Gv, Mp }, /* 386 lists only Mp */ { "btrS", Ev, Gv }, { "lfsS", Gv, Mp }, /* 386 lists only Mp */ { "lgsS", Gv, Mp }, /* 386 lists only Mp */ { "movzbS", Gv, Eb }, { "movzwS", Gv, Ew }, /* 0xb8 */ { "ud2b", NONE }, { "(bad)", BAD }, { GRP8 }, { "btcS", Ev, Gv }, { "bsfS", Gv, Ev }, { "bsrS", Gv, Ev }, { "movsbS", Gv, Eb }, { "movswS", Gv, Ew }, /* 0xc0 */ { "xaddb", Eb, Gb }, { "xaddS", Ev, Gv }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { GRP9 }, /* 0xc8 */ { "bswap", eAX }, { "bswap", eCX }, { "bswap", eDX }, { "bswap", eBX }, { "bswap", eSP }, { "bswap", eBP }, { "bswap", eSI }, { "bswap", eDI }, /* 0xd0 */ { "(bad)", BAD }, { "psrlw", MX, EM }, { "psrld", MX, EM }, { "psrlq", MX, EM }, { "(bad)", BAD }, { "pmullw", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0xd8 */ { "psubusb", MX, EM }, { "psubusw", MX, EM }, { "(bad)", BAD }, { "pand", MX, EM }, { "paddusb", MX, EM }, { "paddusw", MX, EM }, { "(bad)", BAD }, { "pandn", MX, EM }, /* 0xe0 */ { "(bad)", BAD }, { "psraw", MX, EM }, { "psrad", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, { "pmulhw", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0xe8 */ { "psubsb", MX, EM }, { "psubsw", MX, EM }, { "(bad)", BAD }, { "por", MX, EM }, { "paddsb", MX, EM }, { "paddsw", MX, EM }, { "(bad)", BAD }, { "pxor", MX, EM }, /* 0xf0 */ { "(bad)", BAD }, { "psllw", MX, EM }, { "pslld", MX, EM }, { "psllq", MX, EM }, { "(bad)", BAD }, { "pmaddwd", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0xf8 */ { "psubb", MX, EM }, { "psubw", MX, EM }, { "psubd", MX, EM }, { "(bad)", BAD }, { "paddb", MX, EM }, { "paddw", MX, EM }, { "paddd", MX, EM }, { "(bad)", BAD }, }; static opcode_rec_t grps[][8] = { /* GRP1b */ { { "addb", Eb, Ib }, { "orb", Eb, Ib }, { "adcb", Eb, Ib }, { "sbbb", Eb, Ib }, { "andb", Eb, Ib }, { "subb", Eb, Ib }, { "xorb", Eb, Ib }, { "cmpb", Eb, Ib } }, /* GRP1S */ { { "addS", Ev, Iv }, { "orS", Ev, Iv }, { "adcS", Ev, Iv }, { "sbbS", Ev, Iv }, { "andS", Ev, Iv }, { "subS", Ev, Iv }, { "xorS", Ev, Iv }, { "cmpS", Ev, Iv } }, /* GRP1Ss */ { { "addS", Ev, sIb }, { "orS", Ev, sIb }, { "adcS", Ev, sIb }, { "sbbS", Ev, sIb }, { "andS", Ev, sIb }, { "subS", Ev, sIb }, { "xorS", Ev, sIb }, { "cmpS", Ev, sIb } }, /* GRP2b */ { { "rolb", Eb, Ib }, { "rorb", Eb, Ib }, { "rclb", Eb, Ib }, { "rcrb", Eb, Ib }, { "shlb", Eb, Ib }, { "shrb", Eb, Ib }, { "(bad)", BAD }, { "sarb", Eb, Ib }, }, /* GRP2S */ { { "rolS", Ev, Ib }, { "rorS", Ev, Ib }, { "rclS", Ev, Ib }, { "rcrS", Ev, Ib }, { "shlS", Ev, Ib }, { "shrS", Ev, Ib }, { "(bad)", BAD }, { "sarS", Ev, Ib }, }, /* GRP2b_one */ { { "rolb", Eb }, { "rorb", Eb }, { "rclb", Eb }, { "rcrb", Eb }, { "shlb", Eb }, { "shrb", Eb }, { "(bad)", BAD }, { "sarb", Eb }, }, /* GRP2S_one */ { { "rolS", Ev }, { "rorS", Ev }, { "rclS", Ev }, { "rcrS", Ev }, { "shlS", Ev }, { "shrS", Ev }, { "(bad)", BAD }, { "sarS", Ev }, }, /* GRP2b_cl */ { { "rolb", Eb, CL }, { "rorb", Eb, CL }, { "rclb", Eb, CL }, { "rcrb", Eb, CL }, { "shlb", Eb, CL }, { "shrb", Eb, CL }, { "(bad)", BAD }, { "sarb", Eb, CL }, }, /* GRP2S_cl */ { { "rolS", Ev, CL }, { "rorS", Ev, CL }, { "rclS", Ev, CL }, { "rcrS", Ev, CL }, { "shlS", Ev, CL }, { "shrS", Ev, CL }, { "(bad)", BAD }, { "sarS", Ev, CL } }, /* GRP3b */ { { "testb", Eb, Ib }, { "(bad)", Eb }, { "notb", Eb }, { "negb", Eb }, { "mulb", AL, Eb }, { "imulb", AL, Eb }, { "divb", AL, Eb }, { "idivb", AL, Eb } }, /* GRP3S */ { { "testS", Ev, Iv }, { "(bad)", BAD }, { "notS", Ev }, { "negS", Ev }, { "mulS", eAX, Ev }, { "imulS", eAX, Ev }, { "divS", eAX, Ev }, { "idivS", eAX, Ev }, }, /* GRP4 */ { { "incb", Eb }, { "decb", Eb }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, }, /* GRP5 */ { { "incS", Ev }, { "decS", Ev }, { "call", indirEv }, { "lcall", indirEv }, { "jmp", indirEv }, { "ljmp", indirEv }, { "pushS", Ev }, { "(bad)", BAD }, }, /* GRP6 */ { { "sldt", Ew }, { "str", Ew }, { "lldt", Ew }, { "ltr", Ew }, { "verr", Ew }, { "verw", Ew }, { "(bad)", BAD }, { "(bad)", BAD } }, /* GRP7 */ { { "sgdt", Ew }, { "sidt", Ew }, { "lgdt", Ew }, { "lidt", Ew }, { "smsw", Ew }, { "(bad)", BAD }, { "lmsw", Ew }, { "invlpg", Ew }, }, /* GRP8 */ { { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "btS", Ev, Ib }, { "btsS", Ev, Ib }, { "btrS", Ev, Ib }, { "btcS", Ev, Ib }, }, /* GRP9 */ { { "(bad)", BAD }, { "cmpxchg8b", Ev }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, }, /* GRP10 */ { { "(bad)", BAD }, { "(bad)", BAD }, { "psrlw", MS, Ib }, { "(bad)", BAD }, { "psraw", MS, Ib }, { "(bad)", BAD }, { "psllw", MS, Ib }, { "(bad)", BAD }, }, /* GRP11 */ { { "(bad)", BAD }, { "(bad)", BAD }, { "psrld", MS, Ib }, { "(bad)", BAD }, { "psrad", MS, Ib }, { "(bad)", BAD }, { "pslld", MS, Ib }, { "(bad)", BAD }, }, /* GRP12 */ { { "(bad)", BAD }, { "(bad)", BAD }, { "psrlq", MS, Ib }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "psllq", MS, Ib }, { "(bad)", BAD }, } }; static opcode_rec_t float_grps[][8] = { /* d8 */ { { "fadd", ST, STi }, { "fmul", ST, STi }, { "fcom", STi }, { "fcomp", STi }, { "fsub", ST, STi }, { "fsubr", ST, STi }, { "fdiv", ST, STi }, { "fdivr", ST, STi }, }, /* d9 */ { { "fld", STi }, { "fxch", STi }, { FGRPd9_2 }, { "(bad)" }, { FGRPd9_4 }, { FGRPd9_5 }, { FGRPd9_6 }, { FGRPd9_7 }, }, /* da */ { { "fcmovb", ST, STi }, { "fcmove", ST, STi }, { "fcmovbe",ST, STi }, { "fcmovu", ST, STi }, { "(bad)" }, { FGRPda_5 }, { "(bad)" }, { "(bad)" }, }, /* db */ { { "fcmovnb",ST, STi }, { "fcmovne",ST, STi }, { "fcmovnbe",ST, STi }, { "fcmovnu",ST, STi }, { FGRPdb_4 }, { "fucomi", ST, STi }, { "fcomi", ST, STi }, { "(bad)" }, }, /* dc */ { { "fadd", STi, ST }, { "fmul", STi, ST }, { "(bad)" }, { "(bad)" }, { "fsub", STi, ST }, { "fsubr", STi, ST }, { "fdiv", STi, ST }, { "fdivr", STi, ST }, }, /* dd */ { { "ffree", STi }, { "(bad)" }, { "fst", STi }, { "fstp", STi }, { "fucom", STi }, { "fucomp", STi }, { "(bad)" }, { "(bad)" }, }, /* de */ { { "faddp", STi, ST }, { "fmulp", STi, ST }, { "(bad)" }, { FGRPde_3 }, { "fsubp", STi, ST }, { "fsubrp", STi, ST }, { "fdivp", STi, ST }, { "fdivrp", STi, ST }, }, /* df */ { { "(bad)" }, { "(bad)" }, { "(bad)" }, { "(bad)" }, { FGRPdf_4 }, { "fucomip",ST, STi }, { "fcomip", ST, STi }, { "(bad)" }, }, }; static char *fgrps[][8] = { /* d9_2 0 */ { "fnop","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", }, /* d9_4 1 */ { "fchs","fabs","(bad)","(bad)","ftst","fxam","(bad)","(bad)", }, /* d9_5 2 */ { "fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","(bad)", }, /* d9_6 3 */ { "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp", }, /* d9_7 4 */ { "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos", }, /* da_5 5 */ { "(bad)","fucompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", }, /* db_4 6 */ { "feni(287 only)","fdisi(287 only)","fNclex","fNinit", "fNsetpm(287 only)","(bad)","(bad)","(bad)", }, /* de_3 7 */ { "(bad)","fcompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", }, /* df_4 8 */ { "fNstsw","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", }, }; static char *float_mem[] = { /* 0xd8 */ "fadds","fmuls","fcoms","fcomps","fsubs","fsubrs","fdivs","fdivrs", /* 0xd9 */ "flds","(bad)","fsts","fstps","fldenv","fldcw","fNstenv","fNstcw", /* 0xda */ "fiaddl","fimull","ficoml","ficompl","fisubl","fisubrl","fidivl", "fidivrl", /* 0xdb */ "fildl","(bad)","fistl","fistpl","(bad)","fldt","(bad)","fstpt", /* 0xdc */ "faddl","fmull","fcoml","fcompl","fsubl","fsubrl","fdivl","fdivrl", /* 0xdd */ "fldl","(bad)","fstl","fstpl","frstor","(bad)","fNsave","fNstsw", /* 0xde */ "fiadd","fimul","ficom","ficomp","fisub","fisubr","fidiv","fidivr", /* 0xdf */ "fild","(bad)","fist","fistp","fbld","fildll","fbstp","fistpll", }; static const unsigned char onebyte_has_modrm[256] = { /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 }; static const unsigned char twobyte_has_modrm[256] = { /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ /* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */ /* 20 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* 2f */ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */ /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */ /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1, /* 6f */ /* 70 */ 0,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1, /* 7f */ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */ /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */ /* b0 */ 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1, /* bf */ /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */ /* d0 */ 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1, /* df */ /* e0 */ 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1, /* ef */ /* f0 */ 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0 /* ff */ }; #ifdef NOT_USED static int reg_num[] = { 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, }; #endif #ifndef REDHAT static char *reg_name[] = { "%eax","%ecx","%edx","%ebx","%esp","%ebp","%esi","%edi", "%ax","%cx","%dx","%bx","%sp","%bp","%si","%di", "%al","%cl","%dl","%bl","%ah","%ch","%dh","%bh", "%es","%cs","%ss","%ds","%fs","%gs", "bx+si","bx+di","bp+si","bp+di", }; #endif /* !REDHAT */ static int reg_32[] = { R_eAX, R_eCX, R_eDX, R_eBX, R_eSP, R_eBP, R_eSI, R_eDI, }; static int reg_16[] = { R_AX, R_CX, R_DX, R_BX, R_SP, R_BP, R_SI, R_DI, }; static int reg_8[] = { R_AL, R_CL, R_DL, R_BL, R_AH, R_CH, R_DH, R_BH, }; static int reg_seg[] = { R_ES, R_CS, R_SS, R_DS, R_FS, R_GS, R_BAD, R_BAD, }; static int reg_index[] = { R_BX_SI, R_BX_DI, R_BP_SI, R_BP_DI, R_SI, R_DI, R_BP, R_BX, }; #ifndef REDHAT static char *optype_name[] = { "NONE","A","C","D","E","M_indirE","F","G","I","sI","J","M", "O","P","Q","R","S","T","V","W","X","Y","MMX","EM","MS","GRP", "REG", }; static char *opmods[] = { "NONE","a","b","c","d","dg","p","pi", "ps","q","s","ss","si","v","w", }; static char *reg_opname[] = { "eAX","eCX","eDX","eBX","eSP","eBP","eSI","eDI", "AX","CX","DX","BX","SP","BP","SI","DI", "AL","CL","DL","BL","AH","CH","DH","BH", "ES","CS","SS","DS","FS","GS", }; static void printaddr(kaddr_t addr, int flag, FILE *ofp) { int offset = 0; syment_t *sp; if ((sp = kl_lkup_symaddr(addr))) { offset = addr - sp->s_addr; } /* Print out address */ fprintf(ofp, "0x%x", addr); /* Print out symbol name */ if (sp) { if (offset) { fprintf(ofp, " <%s+%d>", sp->s_name, offset); } else { fprintf(ofp, " <%s>", sp->s_name); } } /* Line things up properly for current function */ if (flag) { if (offset == 0) { fprintf(ofp, ": "); } else if (offset < 10) { fprintf(ofp, ": "); } else if (offset < 100) { fprintf(ofp, ": "); } else if (offset < 1000) { fprintf(ofp, ": "); } else if (offset < 10000) { fprintf(ofp, ": "); } else { fprintf(ofp, ": "); } } } static void print_optype(int m, int t, FILE *ofp) { if (m >= M_BAD) { fprintf(ofp, "BAD"); } else if (m == M_REG) { if (t >= R_BAD) { fprintf(ofp, "REG_BAD"); } else { fprintf(ofp, "%s", reg_opname[t]); } } else { if (t == T_NONE) { fprintf(ofp, "%s", optype_name[m]); } else if (t >= T_BAD) { fprintf(ofp, "%s(bad)", optype_name[m]); } else { fprintf(ofp, "%s%s", optype_name[m], opmods[t]); } } } #endif /* !REDHAT */ static void get_modrm_info(unsigned char modr, int *mod_rm, int *reg_op) { *mod_rm = ((modr >> 6) << 3) | (modr & 7); *reg_op = (modr >> 3) & 7; } static int is_prefix(unsigned char c) { int prefix = 0; switch(c) { case 0xf3: prefix = PREFIX_REPZ; break; case 0xf2: prefix = PREFIX_REPNZ; break; case 0xf0: prefix = PREFIX_LOCK; break; case 0x2e: prefix = PREFIX_CS; break; case 0x36: prefix = PREFIX_SS; break; case 0x3e: prefix = PREFIX_DS; break; case 0x26: prefix = PREFIX_ES; break; case 0x64: prefix = PREFIX_FS; break; case 0x65: prefix = PREFIX_GS; break; case 0x66: prefix = PREFIX_DATA; break; case 0x67: prefix = PREFIX_ADR; break; case 0x9b: prefix = PREFIX_FWAIT; break; } return(prefix); } static int get_modrm_reg16(int mod_rm, int opdata, instr_rec_t *irp) { int reg, mod; mod = irp->modrm >> 6; switch (mod_rm) { case 0x6: break; default: reg = mod_rm - (mod * 8); return(reg_index[reg]); } return(R_BAD); } static int get_modrm_reg32(int mod_rm, int opdata, instr_rec_t *irp) { int reg; switch (mod_rm) { case 0x0: case 0x1: case 0x2: case 0x3: case 0x6: case 0x7: return(mod_rm); case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: reg = mod_rm - 0x18; switch (opdata) { case T_b: return(reg_8[reg]); case T_w: return(reg_16[reg]); case T_v: if (irp->dflag) { return(reg_32[reg]); } else { return(reg_16[reg]); } } } return(R_BAD); } #ifndef REDHAT static void print_instrname(char *name, instr_rec_t *irp, FILE *ofp) { char *cp, *np, name_str[100]; strncpy (name_str, name, 100); np = name; cp = name_str; while (*np) { if (*np == 'C') { /* For jcxz/jecxz */ if (irp->aflag) { *cp++ = 'e'; } } else if (*np == 'N') { if ((irp->prefixes & PREFIX_FWAIT) == 0) { *cp++ = 'n'; } } else if (*np == 'S') { /* operand size flag */ if (irp->dflag) { *cp++ = 'l'; } else { *cp++ = 'w'; } } else if (*np == 'W') { /* operand size flag for cwtl, cbtw */ if (irp->dflag) { *cp++ = 'w'; } else { *cp++ = 'b'; } } else { *cp++ = *np; } np++; } while(*cp) { *cp++ = ' '; } *cp = 0; fprintf(ofp, "%s", name_str); } #endif /* !REDHAT */ static void op_a(int opnum, int opdata, instr_rec_t *irp) { int offset; kaddr_t pc; pc = instrbuf.addr + (instrbuf.ptr - instrbuf.buf); switch(opdata) { case T_p: if (irp->aflag) { irp->operand[opnum].op_addr = *(uint32_t*)codeptr; codeptr += 4; } else { irp->operand[opnum].op_addr = *(uint16_t*)codeptr; codeptr += 2; } irp->operand[opnum].op_seg = *(uint16_t*)codeptr; irp->operand[opnum].op_type = O_LPTR; codeptr += 2; break; case T_v: if (irp->aflag) { offset = *(int*)codeptr; irp->operand[opnum].op_addr = pc + offset + 5; codeptr += 4; } else { offset = *(short*)codeptr; irp->operand[opnum].op_addr = pc + offset + 3; codeptr += 2; } irp->operand[opnum].op_type = O_ADDR; break; default: break; } } static void op_c(int opnum, int opdata, instr_rec_t *irp) { int reg; reg = (irp->modrm >> 3) & 7; irp->operand[opnum].op_type = (O_REG|O_CR); irp->operand[opnum].op_reg = reg; } static void op_d(int opnum, int opdata, instr_rec_t *irp) { int reg; reg = (irp->modrm >> 3) & 7; irp->operand[opnum].op_type = (O_REG|O_DB); irp->operand[opnum].op_reg = reg; } static void op_indir_e(int opnum, int opdata, instr_rec_t *irp) { op_e(opnum, opdata, irp); irp->operand[opnum].op_type |= O_INDIR; } static void get_modrm_data16(int opnum, int opdata, instr_rec_t *irp) { int mod ATTRIBUTE_UNUSED; int reg, mod_rm, reg_op; get_modrm_info(irp->modrm, &mod_rm, ®_op); mod = irp->modrm >> 6; switch(mod_rm) { case 0: case 1: case 2: case 3: case 4: case 5: case 7: reg = get_modrm_reg16(mod_rm, opdata, irp); irp->operand[opnum].op_reg = reg; irp->operand[opnum].op_type = (O_REG|O_BASE); break; case 6: /* 16-bit displacement */ irp->operand[opnum].op_type = O_DISP; irp->operand[opnum].op_disp = *(uint16_t*)codeptr; codeptr += 2; break; case 8: /* disp8[BX+SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX_SI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 9: /* disp8[BX+DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX_DI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 10: /* disp8[BP+SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP_SI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 11: /* disp8[BP+DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP_DI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 12: /* disp8[SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_SI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 13: /* disp8[DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_DI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 14: /* disp8[BP] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 15: /* disp8[BX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 16: /* disp16[BX+SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX_SI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 17: /* disp16[BX+DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX_DI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 18: /* disp16[BP+SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP_SI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 19: /* disp16[BP+DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP_DI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 20: /* disp16[SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_SI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 21: /* disp16[DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_DI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 22: /* disp16[BP] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 23: /* disp16[BX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; } } static void get_modrm_data32(int opnum, int opdata, instr_rec_t *irp) { int mod ATTRIBUTE_UNUSED; int reg, mod_rm, reg_op; get_modrm_info(irp->modrm, &mod_rm, ®_op); mod = irp->modrm >> 6; switch(mod_rm) { case 0: case 1: case 2: case 3: case 6: case 7: reg = get_modrm_reg32(mod_rm, opdata, irp); irp->operand[opnum].op_reg = reg; irp->operand[opnum].op_type = (O_REG|O_BASE); break; case 5: /* 32-bit displacement */ irp->operand[opnum].op_type = O_DISP; irp->operand[opnum].op_disp = *(kaddr_t*)codeptr; codeptr += 4; break; case 8: /* disp8[EAX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eAX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 9: /* disp8[ECX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eCX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 10: /* disp8[EDX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eDX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 11: /* disp8[EBX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eBX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 13: /* disp8[EBP] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eBP; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 14: /* disp8[ESI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eSI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 15: /* disp8[EDI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eDI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 16: /* disp32[EAX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eAX; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 17: /* disp32[ECX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eCX; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 18: /* disp32[EDX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eDX; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 19: /* disp32[EBX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eBX; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 4: /* [..][..] (SIB) */ case 12: /* disp8[..][..] (SIB) */ case 20: { /* disp32[..][..] (SIB) */ int rm ATTRIBUTE_UNUSED; int s, i, b, mod, havebase; s = (irp->sib >> 6) & 3; i = (irp->sib >> 3) & 7; b = irp->sib & 7; mod = irp->modrm >> 6; rm = irp->modrm & 7; havebase = 1; switch (mod) { case 0: if (b == 5) { havebase = 0; irp->operand[opnum].op_disp = *(int*)codeptr; irp->operand[opnum].op_type = O_DISP; codeptr += 4; } break; case 1: irp->operand[opnum].op_disp = *(signed char*) codeptr; codeptr++; irp->operand[opnum].op_type = O_DISP; break; case 2: irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; irp->operand[opnum].op_type = O_DISP; break; } if (havebase) { irp->operand[opnum].op_base = b; irp->operand[opnum].op_type |= O_BASE; } if (i != 4) { irp->operand[opnum].op_index = i; irp->operand[opnum].op_type |= O_INDEX; } if (s) { irp->operand[opnum].op_scale = s; irp->operand[opnum].op_type |= O_SCALE; } break; } case 21: /* disp32[EBP] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eBP; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 22: /* disp32[ESI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eSI; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 23: /* disp32[EDI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eDI; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; } } static int op_e(int opnum, int opdata, instr_rec_t *irp) { int reg, mod, mod_rm, reg_op; get_modrm_info(irp->modrm, &mod_rm, ®_op); mod = irp->modrm >> 6; if (mod == 3) { /* ((mod_rm >= 24) && (mod_rm <=31)) */ if (opdata == T_NONE) { return(1); } if (irp->aflag) { reg = get_modrm_reg32(mod_rm, opdata, irp); } else { reg = get_modrm_reg16(mod_rm, opdata, irp); } irp->operand[opnum].op_type = O_REG; irp->operand[opnum].op_reg = reg; if ((reg = R_BAD)) { return(1); } else { return(0); } } if (irp->aflag) { get_modrm_data32(opnum, opdata, irp); } else { get_modrm_data16(opnum, opdata, irp); } if (seg_prefix(irp->prefixes)) { irp->operand[opnum].op_type |= O_SEG; irp->operand[opnum].op_seg = seg_prefix(irp->prefixes); } return(0); } static int op_g(int opnum, int opdata, instr_rec_t *irp) { int reg, mod_rm, reg_op; get_modrm_info(irp->modrm, &mod_rm, ®_op); irp->operand[opnum].op_type = O_REG; if ((reg_op < 0) || (reg_op >= 8)){ irp->operand[opnum].op_reg = R_BAD; return(1); } switch(opdata) { case T_b: reg = reg_8[reg_op]; break; case T_w: reg = reg_16[reg_op]; break; case T_d: reg = reg_32[reg_op]; break; case T_v: if (irp->dflag) { reg = reg_32[reg_op]; } else { reg = reg_16[reg_op]; } break; default: irp->operand[opnum].op_reg = R_BAD; return(1); } irp->operand[opnum].op_reg = reg; return(0); } static void op_i(int opnum, int opdata, instr_rec_t *irp) { irp->operand[opnum].op_type = O_IMMEDIATE; switch (opdata) { case T_b: irp->operand[opnum].op_addr = *(unsigned char*)codeptr; codeptr++; break; case T_w: irp->operand[opnum].op_addr = *(uint16_t*)codeptr; codeptr += 2; break; case T_v: if (irp->dflag) { irp->operand[opnum].op_addr = *(uint32_t*)codeptr; codeptr += 4; } else { irp->operand[opnum].op_addr = *(uint16_t*)codeptr; codeptr += 2; } break; } } static void op_s(int opnum, int opdata, instr_rec_t *irp) { int reg; reg = (irp->modrm >> 3) & 7; irp->operand[opnum].op_reg = reg_seg[reg]; irp->operand[opnum].op_type = O_REG; } static void op_si(int opnum, int opdata, instr_rec_t *irp) { int val; irp->operand[opnum].op_type = O_IMMEDIATE; switch (opdata) { case T_b: val = *(signed char*)codeptr++; irp->operand[opnum].op_addr = val; break; case T_v: if (irp->dflag) { irp->operand[opnum].op_addr = *(int*)codeptr; codeptr += 4; } else { val = *(short*)codeptr; irp->operand[opnum].op_addr = val; codeptr += 2; } break; case T_w: val = *(short*)codeptr; irp->operand[opnum].op_addr = val; codeptr += 2; break; } } static void op_j(int opnum, int opdata, instr_rec_t *irp) { kaddr_t pc; pc = instrbuf.addr + (instrbuf.ptr - instrbuf.buf); pc += (codeptr - instrbuf.ptr); switch (opdata) { case T_b: pc++; pc += *(signed char *)codeptr++; break; case T_v: if (irp->dflag) { /* 32-bit */ pc += 4; pc += *(int*)codeptr; codeptr += 4; } else { /* 16-bit */ pc += 2; pc += *(short*)codeptr; codeptr += 2; } break; } irp->operand[opnum].op_type = O_ADDR; irp->operand[opnum].op_addr = pc; } static void op_m(int opnum, int opdata, instr_rec_t *irp) { op_e(opnum, 0, irp); } static void op_o(int opnum, int opdata, instr_rec_t *irp) { if (irp->aflag) { irp->operand[opnum].op_addr = *(uint32_t*)codeptr; codeptr += 4; } else { irp->operand[opnum].op_addr = *(uint16_t*)codeptr; codeptr += 2; } irp->operand[opnum].op_type = O_OFF; } static void op_r(int opnum, int opdata, instr_rec_t *irp) { int rm; rm = irp->modrm & 7; switch (opdata) { case T_d: irp->operand[opnum].op_reg = reg_32[rm]; break; case T_w: irp->operand[opnum].op_reg = reg_16[rm]; break; } irp->operand[opnum].op_type = O_REG; } static void op_x(int opnum, int opdata, instr_rec_t *irp) { irp->operand[opnum].op_seg = R_DS; if (irp->aflag) { irp->operand[opnum].op_reg = R_eSI; } else { irp->operand[opnum].op_reg = R_SI; } irp->operand[opnum].op_type = O_SEG; } static void op_y(int opnum, int opdata, instr_rec_t *irp) { irp->operand[opnum].op_seg = R_ES; if (irp->aflag) { irp->operand[opnum].op_reg = R_eDI; } else { irp->operand[opnum].op_reg = R_DI; } irp->operand[opnum].op_type = O_SEG; } static void get_operand_info(int opnum, instr_rec_t *irp) { int opcode, opdata; opcode = opdata = 0; switch(opnum) { case 0: opcode = irp->opcodep->Op1; opdata = irp->opcodep->opdata1; break; case 1: opcode = irp->opcodep->Op2; opdata = irp->opcodep->opdata2; break; case 2: opcode = irp->opcodep->Op3; opdata = irp->opcodep->opdata3; break; } switch (opcode) { case M_A: op_a(opnum, opdata, irp); break; case M_C: op_c(opnum, opdata, irp); break; case M_D: op_d(opnum, opdata, irp); break; case M_E: op_e(opnum, opdata, irp); break; case M_indirE: op_indir_e(opnum, opdata, irp); break; case M_G: op_g(opnum, opdata, irp); break; case M_I: op_i(opnum, opdata, irp); break; case M_sI: op_si(opnum, opdata, irp); break; case M_J: op_j(opnum, opdata, irp); break; case M_M: op_m(opnum, opdata, irp); break; case M_O: op_o(opnum, opdata, irp); break; case M_R: op_r(opnum, opdata, irp); break; case M_S: op_s(opnum, opdata, irp); break; case M_X: op_x(opnum, opdata, irp); break; case M_Y: op_y(opnum, opdata, irp); break; case M_REG: case M_indirREG: irp->operand[opnum].op_type = O_REG; if (opdata >= R_AX) { irp->operand[opnum].op_reg = opdata; } else { if (irp->dflag) { irp->operand[opnum].op_reg = reg_32[opdata]; } else { irp->operand[opnum].op_reg = reg_16[opdata]; } } if (opcode == M_indirREG) { /* The O_BASE gets the right results */ irp->operand[opnum].op_type |= O_BASE; } break; } } /* Temporary opcode_rec_s struct that we keep around for the times * when we have to construct a special case instruction (e.g. some * floating point instructions). */ static opcode_rec_t tempop; static char fwait_name[] = "fwait"; int get_instr_info(kaddr_t pc, instr_rec_t *irp) { int opcode, size = 0, p, prefixes = 0; unsigned char modrm = 0; opcode_rec_t *op; if (instr_buf_init) { bzero(&instrbuf, sizeof(instrbuf)); instr_buf_init = 0; } /* Check to see instrbuf is valid and if there are enough * bytes in our instruction cache to cover the worst case * scenario for this pc. */ if (!instrbuf.addr || (pc < instrbuf.addr) || (pc > (instrbuf.addr + instrbuf.size - 15))) { instrbuf.addr = pc; instrbuf.size = 256; #ifdef REDHAT fill_instr_cache(pc, (char *)instrbuf.buf); #else GET_BLOCK(pc, 256, instrbuf.buf); #endif if (KL_ERROR) { return(0); } } /* Make sure that the instruction pointer points to the * right byte in the buffer. */ instrbuf.ptr = instrbuf.buf + (pc - instrbuf.addr); codeptr = instrbuf.ptr; irp->addr = pc; /* Check for prefixes */ while((p = is_prefix(*codeptr))) { prefixes |= p; codeptr++; if ((prefixes & PREFIX_FWAIT) && ((*codeptr < 0xd8) || (*codeptr > 0xdf))) { /* If there is an fwait prefix that is not * followed by a float instruction, we need to * create a special instruction record so that * the "fwait" gets printed out. */ bzero(&tempop, sizeof(tempop)); tempop.name = fwait_name; irp->opcodep = &tempop; size = ((unsigned)codeptr - (unsigned)instrbuf.ptr); instrbuf.ptr = codeptr; irp->size = size; return(size); } } if (prefixes & PREFIX_DATA) { irp->dflag ^= 1; } if (prefixes & PREFIX_ADR) { irp->aflag ^= 1; } /* Check for one or two byte opcode, capture the opcode and * check for a ModR/M byte. */ if (*codeptr == 0x0f) { opcode = *((unsigned short*)codeptr); codeptr++; op = &op_386_twobyte[*codeptr]; if(twobyte_has_modrm[*codeptr]) { codeptr++; modrm = *codeptr++; } else { codeptr++; } if (STREQ(op->name, "ud2a")) codeptr += kt->BUG_bytes; } else { opcode = *codeptr; op = &op_386[*codeptr]; if(onebyte_has_modrm[*codeptr]) { codeptr++; modrm = *codeptr++; } else { codeptr++; } } /* See if the get_op bits from the modrm are needed to determine * the actual instruction. */ if (op->Op1 == M_GRP) { op = &grps[op->opdata1][(modrm & 0x38) >> 3]; /* Put something unique in opcode */ opcode = ((opcode << 8)|((modrm & 0x38) >> 3)); } else if (op->Op1 == M_FLOAT) { int mod, rm, reg; mod = modrm >> 6; rm = modrm & 7; reg = (modrm >> 3) & 7; bzero(&tempop, sizeof(tempop)); if (mod != 3) { tempop.name = float_mem[(opcode - 0xd8) * 8 + reg]; tempop.Op1 = M_E; tempop.opdata1 = T_v; op = &tempop; } else { op = &float_grps[opcode - 0xd8][reg]; if (op->Op1 == M_FGRP) { tempop.name = fgrps[op->opdata1][rm]; /* instruction fnstsw is only one with * strange arg */ if ((opcode == 0xdf) && (*codeptr == 0xe0)) { irp->operand[1].op_type = O_REG; irp->operand[1].op_reg = R_eAX; } op = &tempop; } } } irp->opcodep = op; irp->opcode = opcode; irp->modrm = modrm; irp->prefixes = prefixes; /* Check to see if this is a bad instruction (per a table entry) */ if (op->opdata1 == T_BAD) { /* Back off the modrm if we grabbed one and return * from here. */ if (modrm) { codeptr--; size = ((unsigned)codeptr - (unsigned)instrbuf.ptr); instrbuf.ptr = codeptr; irp->size = size; return(size); } } /* Check to see if there is an SIB byte. */ if (((modrm & 0xc0) != 0xc0) && ((modrm & 7) == 4)) { /* There is an SIB byte */ irp->sib = *codeptr++; irp->have_sib = 1; } /* Gather information on operands */ if (op->Op1 && (op->Op1 != M_BAD)) { get_operand_info(0, irp); } if (op->Op2 && (op->Op2 != M_BAD)) { get_operand_info(1, irp); } if (op->Op3 && (op->Op3 != M_BAD)) { get_operand_info(2, irp); } /* Determine total instruction size and adjust instrbuf ptr */ size = ((unsigned)codeptr - (unsigned)instrbuf.ptr); instrbuf.ptr = codeptr; irp->size = size; return(size); } static int seg_prefix(int prefixes) { if (prefixes & PREFIX_CS) { return(R_CS); } else if (prefixes & PREFIX_DS) { return(R_DS); } else if (prefixes & PREFIX_SS) { return(R_SS); } else if (prefixes & PREFIX_ES) { return(R_ES); } else if (prefixes & PREFIX_FS) { return(R_FS); } else if (prefixes & PREFIX_GS) { return(R_GS); } return(0); } #ifdef NOT_USED static void print_seg_prefix(instr_rec_t *irp, FILE *ofp) { if (irp->prefixes & PREFIX_CS) { fprintf(ofp, "%%cs:"); } if (irp->prefixes & PREFIX_DS) { fprintf(ofp, "%%ds:"); } if (irp->prefixes & PREFIX_SS) { fprintf(ofp, "%%ss:"); } if (irp->prefixes & PREFIX_ES) { fprintf(ofp, "%%es:"); } if (irp->prefixes & PREFIX_FS) { fprintf(ofp, "%%fs:"); } if (irp->prefixes & PREFIX_GS) { fprintf(ofp, "%%gs:"); } } #endif #ifndef REDHAT static int print_prefixes(instr_rec_t *irp, FILE *ofp) { int cnt = 0; if (irp->prefixes & PREFIX_REPZ) { fprintf(ofp, "repz "); cnt++; } if (irp->prefixes & PREFIX_REPNZ) { fprintf(ofp, "repnz "); cnt++; } if (irp->prefixes & PREFIX_LOCK) { fprintf(ofp, "lock "); cnt++; } if (irp->prefixes & PREFIX_ADR) { if (irp->aflag) { fprintf(ofp, "addr32 "); } else { fprintf(ofp, "addr16 "); } cnt++; } return(cnt); } static void print_sib_value(int opnum, instr_rec_t *irp, FILE *ofp) { if (irp->operand[opnum].op_type & O_REG) { if (irp->operand[opnum].op_type & O_BASE) { fprintf(ofp, "(%s)", reg_name[irp->operand[opnum].op_reg]); } else { fprintf(ofp, "%s", reg_name[irp->operand[opnum].op_reg]); } return; } else if (irp->operand[opnum].op_type & O_IMMEDIATE) { fprintf(ofp, "$0x%x", irp->operand[opnum].op_addr); return; } fprintf(ofp, "("); if (irp->operand[opnum].op_type & O_BASE) { fprintf(ofp, "%s,", reg_name[irp->operand[opnum].op_base]); } else { fprintf(ofp, ","); } if (irp->operand[opnum].op_type & O_INDEX) { fprintf(ofp, "%s,", reg_name[irp->operand[opnum].op_index]); } fprintf(ofp, "%d)", (1 << irp->operand[opnum].op_scale)); } static void print_opvalue(int opnum, instr_rec_t *irp, FILE *ofp) { if (irp->operand[opnum].op_type & O_REG) { if (irp->operand[opnum].op_type & (O_BASE|O_DISP)) { fprintf(ofp, "(%s)", reg_name[irp->operand[opnum].op_reg]); } else { fprintf(ofp, "%s", reg_name[irp->operand[opnum].op_reg]); } } else if (irp->operand[opnum].op_type & O_IMMEDIATE) { fprintf(ofp, "$0x%x", irp->operand[opnum].op_addr); } else if (irp->operand[opnum].op_type & O_ADDR) { /* jump or call address */ printaddr(irp->operand[opnum].op_addr, 0, ofp); } else if (irp->operand[opnum].op_type & O_OFF) { fprintf(ofp, "0x%x", irp->operand[opnum].op_addr); } } int print_instr(kaddr_t pc, FILE *ofp, int flag) { int p = 0, i, j, size, print_comma = 0; instr_rec_t irp; opcode_rec_t *op; bzero(&irp, sizeof(irp)); /* XXX -- For now, make aflag and dflag equal to one. Should get * this from some sort of configuration struct (set via * initialization) */ irp.aflag = 1; irp.dflag = 1; size = get_instr_info(pc, &irp); op = irp.opcodep; if (!op) { fprintf(ofp, "BAD INSTR (pc=0x%x)\n", pc); return(0); } printaddr(pc, 1, ofp); if (flag) { fprintf(ofp, "0x%04x ", irp.opcode); } if (irp.prefixes) { p = print_prefixes(&irp, ofp); } print_instrname(op->name, &irp, ofp); /* HACK! but necessary to match i386-dis.c output for fwait. */ if (!strcmp(op->name, "fwait")) { fprintf(ofp, "\n"); return(irp.size); } if (p || (strlen(op->name) >= 7)) { fprintf(ofp, " "); } else { for (i = 0; i < (7 - strlen(op->name)); i++) { fprintf(ofp, " "); } } for (j = 0; j < 3; j++) { if (irp.opcode == 0xc8) { i = j; } else { i = 2 - j; } if(irp.operand[i].op_type) { if (print_comma) { fprintf(ofp, ","); } if (irp.operand[i].op_type & O_LPTR) { fprintf(ofp, "0x%x,0x%x", irp.operand[i].op_seg, irp.operand[i].op_addr); print_comma++; continue; } if (irp.operand[i].op_type & O_CR) { fprintf(ofp, "%%cr%d", irp.operand[i].op_reg); print_comma++; continue; } if (irp.operand[i].op_type & O_DB) { fprintf(ofp, "%%db%d", irp.operand[i].op_reg); print_comma++; continue; } if (irp.operand[i].op_type & O_SEG) { fprintf(ofp, "%s:(%s)", reg_name[irp.operand[i].op_seg], reg_name[irp.operand[i].op_reg]); print_comma++; continue; } if (irp.operand[i].op_type & O_INDIR) { fprintf(ofp, "*"); } if (irp.operand[i].op_type & O_DISP) { fprintf(ofp, "0x%x", irp.operand[i].op_disp); } if (irp.have_sib) { print_sib_value(i, &irp, ofp); } else { print_opvalue(i, &irp, ofp); } print_comma++; } } if (flag) { fprintf(ofp, " (%d %s)\n", irp.size, (irp.size > 1) ? "bytes" : "byte"); } else { fprintf(ofp, "\n"); } return(irp.size); } void list_instructions(FILE *ofp) { int i, j, print_comma = 0; fprintf(ofp, "ONE BYTE INSTRUCTIONS:\n\n"); for(i = 0; i < 256; i++) { fprintf(ofp, "0x%04x %s", i, op_386[i].name); for (j = 0; j < (10 - strlen(op_386[i].name)); j++) { fprintf(ofp, " "); } if (op_386[i].Op1) { print_optype(op_386[i].Op1, op_386[i].opdata1, ofp); print_comma++; } if (op_386[i].Op2) { if (print_comma) { fprintf(ofp, ","); } print_optype(op_386[i].Op2, op_386[i].opdata2, ofp); print_comma++; } if (op_386[i].Op3) { if (print_comma) { fprintf(ofp, ","); } print_optype(op_386[i].Op3, op_386[i].opdata3, ofp); } fprintf(ofp, "\n"); } fprintf(ofp, "\nTWO BYTE INSTRUCTIONS:\n\n"); for(i = 0; i < 256; i++) { fprintf(ofp, "0x0f%02x %s", i, op_386_twobyte[i].name); for (j = 0; j < (10 - strlen(op_386_twobyte[i].name)); j++) { fprintf(ofp, " "); } if (op_386_twobyte[i].Op1) { print_optype(op_386_twobyte[i].Op1, op_386_twobyte[i].opdata1, ofp); print_comma++; } if (op_386_twobyte[i].Op2) { if (print_comma) { fprintf(ofp, ","); } print_optype(op_386_twobyte[i].Op2, op_386_twobyte[i].opdata2, ofp); print_comma++; } if (op_386_twobyte[i].Op3) { if (print_comma) { fprintf(ofp, ","); } print_optype(op_386_twobyte[i].Op3, op_386_twobyte[i].opdata3, ofp); } fprintf(ofp, "\n"); } } #endif /* !REDHAT */ void free_instr_stream(instr_rec_t *irp) { instr_rec_t *ptr; if(irp) { while (irp->prev) { irp = irp->prev; } while (irp) { ptr = irp; irp = irp->next; kl_free_block(ptr); } } } instr_rec_t * get_instr_stream(kaddr_t pc, int bcount, int acount) { int size, count = 0; kaddr_t addr, start_addr, end_addr; syment_t *sp1, *sp2; #ifdef REDHAT syment_t *sp, *sp_next, *sp_next_next; ulong offset; #endif instr_rec_t *fst = (instr_rec_t *)NULL, *lst, *ptr, *cur; #ifdef REDHAT cur = NULL; if ((sp = x86_is_entry_tramp_address(pc, &offset))) pc = sp->value + offset; #endif if (!(sp1 = kl_lkup_symaddr(pc))) { return((instr_rec_t *)NULL); } start_addr = sp1->s_addr; if (pc <= (sp1->s_addr + (bcount * 15))) { if ((sp2 = kl_lkup_symaddr(sp1->s_addr - 4))) { start_addr = sp2->s_addr; } } #ifdef REDHAT sp_next = next_symbol(NULL, sp1); if (!sp_next) return((instr_rec_t *)NULL); sp_next_next = next_symbol(NULL, sp_next); if (pc > (sp_next->s_addr - (acount * 15))) { if (sp_next_next) { end_addr = sp_next_next->s_addr; } else { end_addr = sp_next->s_addr; } } else { end_addr = sp_next->s_addr; } #else if (pc > (sp1->s_next->s_addr - (acount * 15))) { if (sp1->s_next->s_next) { end_addr = sp1->s_next->s_next->s_addr; } else { end_addr = sp1->s_next->s_addr; } } else { end_addr = sp1->s_next->s_addr; } #endif addr = start_addr; while (addr <= pc) { if (addr >= end_addr) { /* We've gone too far (beyond the end of this * function) The pc most likely was not valid * (it pointed into the middle of an instruction). */ free_instr_stream(cur); return((instr_rec_t *)NULL); } if (count <= bcount) { /* Allocate another record */ cur = (instr_rec_t *) kl_alloc_block(sizeof(instr_rec_t), K_TEMP); count++; cur->aflag = cur->dflag = 1; if ((ptr = fst)) { while (ptr->next) { ptr = ptr->next; } ptr->next = cur; cur->prev = ptr; } else { fst = cur; } } else { /* Pull the last record to the front of the list */ ptr = fst; if (ptr->next) { fst = ptr->next; fst->prev = (instr_rec_t *)NULL; cur->next = ptr; } bzero(ptr, sizeof(*ptr)); ptr->aflag = ptr->dflag = 1; if (ptr != fst) { ptr->prev = cur; } cur = ptr; } size = get_instr_info(addr, cur); if (size == 0) { free_instr_stream(cur); return((instr_rec_t *)NULL); } addr += size; } if (acount) { lst = cur; for (count = 0; count < acount; count++) { ptr = (instr_rec_t *) kl_alloc_block(sizeof(instr_rec_t), K_TEMP); ptr->aflag = ptr->dflag = 1; size = get_instr_info(addr, ptr); if (size == 0) { kl_free_block(ptr); return(cur); } lst->next = ptr; ptr->prev = lst; lst = ptr; addr += size; } } return(cur); } #ifndef REDHAT /* * print_instr_stream() */ kaddr_t print_instr_stream(kaddr_t value, int bcount, int acount, int flags, FILE *ofp) { kaddr_t v = value; instr_rec_t *cur_irp, *irp; if ((cur_irp = get_instr_stream(v, bcount, acount))) { irp = cur_irp; /* Walk back to the start of the stream and then * print out all instructions in the stream. */ while (irp->prev) { irp = irp->prev; } while (irp) { if (flags & C_FULL) { print_instr(irp->addr, ofp, 1); } else { print_instr(irp->addr, ofp, 0); } if (irp->addr >= value) { v += irp->size; } irp = irp->next; } free_instr_stream(cur_irp); } return(v); } /* * dump_instr() -- architecture specific instruction dump routine */ void dump_instr(kaddr_t addr, uint64_t count, int flags, FILE *ofp) { fprintf(ofp, "This operation not supported for i386 architecture.\n"); } #endif /* !REDHAT */ /* * lkcdutils-4.1/libutil/kl_queue.c */ /* * Copyright 2002 Silicon Graphics, Inc. All rights reserved. */ #ifndef REDHAT #include #endif /* * kl_enqueue() -- Add a new element to the tail of doubly linked list. */ void kl_enqueue(element_t **list, element_t *new) { element_t *head; /* * If there aren't any elements on the list, then make new element the * head of the list and make it point to itself (next and prev). */ if (!(head = *list)) { new->next = new; new->prev = new; *list = new; } else { head->prev->next = new; new->prev = head->prev; new->next = head; head->prev = new; } } /* * kl_dequeue() -- Remove an element from the head of doubly linked list. */ element_t * kl_dequeue(element_t **list) { element_t *head; /* If there's nothing queued up, just return */ if (!*list) { return((element_t *)NULL); } head = *list; /* If there is only one element on list, just remove it */ if (head->next == head) { *list = (element_t *)NULL; } else { head->next->prev = head->prev; head->prev->next = head->next; *list = head->next; } head->next = 0; return(head); } #ifndef REDHAT /* * kl_findqueue() */ int kl_findqueue(element_t **list, element_t *item) { element_t *e; /* If there's nothing queued up, just return */ if (!*list) { return(0); } e = *list; /* Check to see if there is only one element on the list. */ if (e->next == e) { if (e != item) { return(0); } } else { /* Now walk linked list looking for item */ while(1) { if (e == item) { break; } else if (e->next == *list) { return(0); } e = e->next; } } return(1); } /* * kl_findlist_queue() */ int kl_findlist_queue(list_of_ptrs_t **list, list_of_ptrs_t *item, int (*compare)(void *,void *)) { list_of_ptrs_t *e; /* If there's nothing queued up, just return */ if (!*list) { return(0); } e = *list; /* Check to see if there is only one element on the list. */ if (((element_t *)e)->next == (element_t *)e) { if (compare(e,item)) { return(0); } } else { /* Now walk linked list looking for item */ while(1) { if (!compare(e,item)) { break; } else if (((element_t *)e)->next == (element_t *)*list) { return(0); } e = (list_of_ptrs_t *)((element_t *)e)->next; } } return(1); } /* * kl_remqueue() -- Remove specified element from doubly linked list. */ void kl_remqueue(element_t **list, element_t *item) { /* Check to see if item is first on the list */ if (*list == item) { if (item->next == item) { *list = (element_t *)NULL; return; } else { *list = item->next; } } /* Remove item from list */ item->next->prev = item->prev; item->prev->next = item->next; } #endif /* !REDHAT */ #endif /* X86 */ crash-7.1.4/memory_driver/0000755000000000000000000000000012634305150014203 5ustar rootrootcrash-7.1.4/memory_driver/Makefile0000664000000000000000000000115212634305150015644 0ustar rootroot# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # obj-m := crash.o all: make -C /lib/modules/`uname -r`/build SUBDIRS=${PWD} modules clean: rm -f *.mod.c *.ko *.o Module.* crash-7.1.4/memory_driver/crash.c0000664000000000000000000000755312634305150015463 0ustar rootroot/* * linux/drivers/char/crash.c * * Copyright (C) 2004, 2011 Dave Anderson * Copyright (C) 2004, 2011 Red Hat, Inc. */ /****************************************************************************** * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ #include #include #include #include #include #include #include #include #include #include extern int page_is_ram(unsigned long); static inline void * map_virtual(u64 offset, struct page **pp) { struct page *page; unsigned long pfn; void *vaddr; pfn = (unsigned long)(offset >> PAGE_SHIFT); #ifdef NOTDEF /* * page_is_ram() is typically not exported, but there may * be another architecture, kernel version, or distribution * specific mechanism that can be plugged in here if desired. */ if (!page_is_ram(pfn)) { printk(KERN_INFO "crash memory driver: !page_is_ram(pfn: %lx)\n", pfn); return NULL; } #endif if (!pfn_valid(pfn)) { printk(KERN_INFO "crash memory driver: invalid pfn: %lx\n", pfn); return NULL; } page = pfn_to_page(pfn); vaddr = kmap(page); if (!vaddr) { printk(KERN_INFO "crash memory driver: pfn: %lx kmap(page: %lx) failed\n", pfn, (unsigned long)page); return NULL; } *pp = page; return (vaddr + (offset & (PAGE_SIZE-1))); } static inline void unmap_virtual(struct page *page) { kunmap(page); } #define CRASH_VERSION "1.1" /* * These are the file operation functions that allow crash utility * access to physical memory. */ static loff_t crash_llseek(struct file * file, loff_t offset, int orig) { switch (orig) { case 0: file->f_pos = offset; return file->f_pos; case 1: file->f_pos += offset; return file->f_pos; default: return -EINVAL; } } /* * Determine the page address for an address offset value, * get a virtual address for it, and copy it out. * Accesses must fit within a page. */ static ssize_t crash_read(struct file *file, char *buf, size_t count, loff_t *poff) { void *vaddr; struct page *page; u64 offset; ssize_t read; offset = *poff; if (offset >> PAGE_SHIFT != (offset+count-1) >> PAGE_SHIFT) return -EINVAL; vaddr = map_virtual(offset, &page); if (!vaddr) return -EFAULT; if (copy_to_user(buf, vaddr, count)) { unmap_virtual(page); return -EFAULT; } unmap_virtual(page); read = count; *poff += read; return read; } static struct file_operations crash_fops = { .owner = THIS_MODULE, .llseek = crash_llseek, .read = crash_read, }; static struct miscdevice crash_dev = { MISC_DYNAMIC_MINOR, "crash", &crash_fops }; static int __init crash_init(void) { int ret; ret = misc_register(&crash_dev); if (ret) { printk(KERN_ERR "crash memory driver: cannot misc_register (MISC_DYNAMIC_MINOR)\n"); goto out; } ret = 0; printk(KERN_INFO "crash memory driver: version %s\n", CRASH_VERSION); out: return ret; } static void __exit crash_cleanup_module(void) { misc_deregister(&crash_dev); } module_init(crash_init); module_exit(crash_cleanup_module); MODULE_LICENSE("GPL"); crash-7.1.4/memory_driver/README0000664000000000000000000000131312634305150015063 0ustar rootrootFor live system analysis, the physical memory source must be one of the following devices: /dev/mem /proc/kcore /dev/crash If the live system kernel was configured with CONFIG_STRICT_DEVMEM, then /dev/mem cannot be used. If the live system kernel was configured without CONFIG_PROC_KCORE, or if /proc/kcore is non-functional, then /proc/kcore cannot be used. The third alternative is this /dev/crash driver. Presuming that /lib/modules/`uname -r`/build points to a kernel build tree or kernel "devel" package tree, the module can simply be built and installed like so: # make ... # insmod crash.ko Once installed, the /dev/crash driver will be used by default for live system crash sessions. crash-7.1.4/extensions/0000755000000000000000000000000012634305150013517 5ustar rootrootcrash-7.1.4/extensions/Makefile0000664000000000000000000000377512634305150015175 0ustar rootroot# # Makefile for building crash shared object extensions # # Copyright (C) 2005, 2007, 2009, 2011, 2013 David Anderson # Copyright (C) 2005, 2007, 2009, 2011, 2013 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # To build the extension shared objects in this directory, run # "make extensions" from the top-level directory. # # To add a new extension object, simply copy your module's .c file # to this directory, and it will be built automatically using # the "standard" compile line. If that compile line does not # suffice, create a .mk file with the same prefix as the .c file, # and that makefile will be invoked. # CONTRIB_SO := $(patsubst %.c,%.so,$(wildcard *.c)) all: link_defs $(CONTRIB_SO) link_defs: @rm -f defs.h @ln ../defs.h $(CONTRIB_SO): %.so: %.c defs.h @if [ -f $*.mk ]; then \ make -f $*.mk; \ else \ grep '((constructor))' $*.c > .constructor; \ if [ -s .constructor ]; then \ echo "gcc -Wall -g -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS)"; \ gcc -Wall -g -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS); \ fi; \ if [ ! -s .constructor ]; then \ echo "gcc -Wall -g -nostartfiles -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS)"; \ gcc -Wall -g -nostartfiles -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS); \ fi; \ rm -f .constructor; \ fi clean: rm -f $(CONTRIB_SO) @for MAKEFILE in `grep -sl "^clean:" *.mk`; \ do make --no-print-directory -f $$MAKEFILE clean; \ done crash-7.1.4/extensions/echo.c0000664000000000000000000000621712634305150014611 0ustar rootroot/* echo.c - simple example of a crash extension * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2005, 2007, 2013 David Anderson * Copyright (C) 2002-2005, 2007, 2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* From the crash source top-level directory */ void echo_init(void); /* constructor function */ void echo_fini(void); /* destructor function (optional) */ void cmd_echo(void); /* Declare the commands and their help data. */ char *help_echo[]; static struct command_table_entry command_table[] = { { "echo", cmd_echo, help_echo, 0}, /* One or more commands, */ { NULL }, /* terminated by NULL, */ }; void __attribute__((constructor)) echo_init(void) /* Register the command set. */ { register_extension(command_table); } /* * This function is called if the shared object is unloaded. * If desired, perform any cleanups here. */ void __attribute__((destructor)) echo_fini(void) { } /* * Arguments are passed to the command functions in the global args[argcnt] * array. See getopt(3) for info on dash arguments. Check out defs.h and * other crash commands for usage of the myriad of utility routines available * to accomplish what your task. */ void cmd_echo(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) fprintf(fp, "%s ", args[optind++]); fprintf(fp, "\n"); } /* * The optional help data is simply an array of strings in a defined format. * For example, the "help echo" command will use the help_echo[] string * array below to create a help page that looks like this: * * NAME * echo - echoes back its arguments * * SYNOPSIS * echo arg ... * * DESCRIPTION * This command simply echoes back its arguments. * * EXAMPLE * Echo back all command arguments: * * crash> echo hello, world * hello, world * */ char *help_echo[] = { "echo", /* command name */ "echoes back its arguments", /* short description */ "arg ...", /* argument synopsis, or " " if none */ " This command simply echoes back its arguments.", "\nEXAMPLE", " Echo back all command arguments:\n", " crash> echo hello, world", " hello, world", NULL }; crash-7.1.4/extensions/eppic.mk0000664000000000000000000000452312634305150015156 0ustar rootroot# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. TARGET_FLAGS = -D$(TARGET) ifeq ($(TARGET), PPC64) TARGET_FLAGS += -m64 endif ifeq ($(TARGET), ARM) TARGET_FLAGS += -m32 endif ifeq ($(TARGET), MIPS) TARGET_FLAGS += -m32 endif ifeq ($(TARGET), X86) TARGET_FLAGS += -m32 endif APPFILE=eppic/applications/crash/eppic.c GITHUB := $(shell ping -c 1 github.com | grep "1 received") GIT := $(shell which git 2> /dev/null) all: @if [ -f /usr/bin/flex ] && [ -f /usr/bin/bison ]; then \ if [ -f ../$(GDB)/crash.target ]; \ then \ if [ ! -f $(APPFILE) ]; \ then \ if [ -f "$(GIT)" ]; \ then \ if [ -n "$(EPPIC_GIT_URL)" ]; then \ git clone "$(EPPIC_GIT_URL)" eppic; \ else \ if [ -n "$(GITHUB)" ] ; then \ git clone https://github.com/lucchouina/eppic.git eppic; \ fi; \ fi; \ else \ if [ ! -f "$(GIT)" ]; then \ echo "eppic.so: git command is needed for pulling eppic extension code"; \ fi; \ fi; \ fi; \ if [ -f $(APPFILE) ]; \ then \ make -f eppic.mk eppic.so; \ else \ echo "eppic.so: failed to pull eppic code from git repo"; \ fi; \ else \ echo "eppic.so: build failed: requires the crash $(GDB) module"; \ fi ;\ else \ echo "eppic.so: build failed: requires /usr/bin/flex and /usr/bin/bison"; \ fi lib-eppic: cd eppic/libeppic && make eppic.so: ../defs.h $(APPFILE) lib-eppic gcc -g -Ieppic/libeppic -I../$(GDB)/gdb -I../$(GDB)/bfd -I../$(GDB)/include -I../$(GDB)/gdb/config -I../$(GDB)/gdb/common -I../$(GDB) -nostartfiles -shared -rdynamic -o eppic.so $(APPFILE) -fPIC $(TARGET_FLAGS) $(GDB_FLAGS) -Leppic/libeppic -leppic clean: if [ -d eppic/libeppic ]; \ then \ cd eppic/libeppic && make -i clean; \ fi rm -f eppic.so crash-7.1.4/extensions/snap.c0000664000000000000000000004714012634305150014634 0ustar rootroot/* snap.c - capture live memory into a kdump or netdump dumpfile * * Copyright (C) 2009, 2013 David Anderson * Copyright (C) 2009, 2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include void snap_init(void); void snap_fini(void); void cmd_snap(void); char *help_snap[]; static struct command_table_entry command_table[] = { { "snap", cmd_snap, help_snap, 0 }, { NULL } }; static char *generate_elf_header(int, int, char *); static int verify_paddr(physaddr_t); static void init_ram_segments(void); static int print_progress(const char *, ulong); #if defined(X86) || defined(X86_64) || defined(IA64) || defined(PPC64) || defined(ARM64) int supported = TRUE; #else int supported = FALSE; #endif void __attribute__((constructor)) snap_init(void) /* Register the command set. */ { register_extension(command_table); } void __attribute__((destructor)) snap_fini(void) { } /* * Just pass in an unused filename. */ void cmd_snap(void) { int c, fd, n; physaddr_t paddr; size_t offset; char *buf; char *filename; struct node_table *nt; int type; char *elf_header; Elf64_Phdr *load; int load_index; if (!supported) error(FATAL, "command not supported on the %s architecture\n", pc->machine_type); filename = NULL; buf = GETBUF(PAGESIZE()); type = KDUMP_ELF64; while ((c = getopt(argcnt, args, "n")) != EOF) { switch(c) { case 'n': if (machine_type("X86_64")) option_not_supported('n'); else type = NETDUMP_ELF64; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { if (filename) cmd_usage(pc->curcmd, SYNOPSIS); if (file_exists(args[optind], NULL)) error(FATAL, "%s: file already exists\n", args[optind]); else if ((fd = open(args[optind], O_RDWR|O_CREAT, 0644)) < 0) error(FATAL, args[optind]); filename = args[optind]; optind++; } if (!filename) cmd_usage(pc->curcmd, SYNOPSIS); init_ram_segments(); if (!(elf_header = generate_elf_header(type, fd, filename))) error(FATAL, "cannot generate ELF header\n"); load = (Elf64_Phdr *)(elf_header + sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr)); load_index = machine_type("X86_64") || machine_type("IA64") ? 1 : 0; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; paddr = nt->start_paddr; offset = load[load_index + n].p_offset; for (c = 0; c < nt->size; c++, paddr += PAGESIZE()) { if (!verify_paddr(paddr)) continue; if (!readmem(paddr, PHYSADDR, &buf[0], PAGESIZE(), "memory page", QUIET|RETURN_ON_ERROR)) continue; lseek(fd, (off_t)(paddr + offset - nt->start_paddr), SEEK_SET); if (write(fd, &buf[0], PAGESIZE()) != PAGESIZE()) error(FATAL, "write to dumpfile failed\n"); if (!print_progress(filename, BTOP(paddr))) return; } } fprintf(stderr, "\r%s: [100%%] ", filename); fprintf(fp, "\n"); sprintf(buf, "/bin/ls -l %s\n", filename); system(buf); FREEBUF(elf_header); FREEBUF(buf); } char *help_snap[] = { "snap", /* command name */ "take a memory snapshot", /* short description */ "[-n] dumpfile", /* filename */ " This command takes a snapshot of physical memory and creates an ELF vmcore.", " The default vmcore is a kdump-style dumpfile. Supported on x86, x86_64,", " ia64 and ppc64 architectures only.", " ", " -n create a netdump-style vmcore (n/a on x86_64).", NULL }; /* * Architecture-specific and -generic ELF header data borrowed from the * netdump.h file in the netdump package, modified slightly to also create * a kdump-style vmcore. */ /****************************************************************************** * Elf core dumping * ******************************************************************************/ /* * Host-platform independent data */ #define ELF_PRARGSZ (80) /* Number of chars for args */ struct elf_prpsinfo_64 { char pr_state; /* numeric process state */ char pr_sname; /* char for pr_state */ char pr_zomb; /* zombie */ char pr_nice; /* nice val */ __u64 pr_flag; /* flags */ __u32 pr_uid; __u32 pr_gid; __u32 pr_pid, pr_ppid, pr_pgrp, pr_sid; /* Lots missing */ char pr_fname[16]; /* filename of executable */ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; /* * i386 specific */ struct user_regs_struct_i386 { __u32 ebx, ecx, edx, esi, edi, ebp, eax; __u16 ds, __ds, es, __es; __u16 fs, __fs, gs, __gs; __u32 orig_eax, eip; __u16 cs, __cs; __u32 eflags, esp; __u16 ss, __ss; }; #define ELF_NGREG_I386 (sizeof (struct user_regs_struct_i386) / sizeof(__u32)) typedef __u32 elf_gregset_i386_t[ELF_NGREG_I386]; struct elf_prstatus_i386 { char pad[72]; elf_gregset_i386_t pr_reg; /* GP registers */ __u32 pr_fpvalid; /* True if math co-processor being used. */ }; /* * x86_64 specific */ struct user_regs_struct_x86_64 { __u64 r15,r14,r13,r12,rbp,rbx,r11,r10; __u64 r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; __u64 rip,cs,eflags; __u64 rsp,ss; __u64 fs_base, gs_base; __u64 ds,es,fs,gs; }; #define ELF_NGREG_X86_64 (sizeof (struct user_regs_struct_x86_64) / sizeof(__u64)) typedef __u64 elf_gregset_x86_64_t[ELF_NGREG_X86_64]; struct elf_prstatus_x86_64 { char pad[112]; elf_gregset_x86_64_t pr_reg; /* GP registers */ __u32 pr_fpvalid; /* True if math co-processor being used. */ }; /* * ppc64 specific */ struct user_regs_struct_ppc64 { __u64 gpr[32]; __u64 nip; __u64 msr; __u64 orig_gpr3; __u64 ctr; __u64 link; __u64 xer; __u64 ccr; __u64 softe; __u64 trap; __u64 dar; __u64 dsisr; __u64 result; }; #define ELF_NGREG_PPC64 (sizeof (struct user_regs_struct_ppc64) / sizeof(__u64)) typedef __u64 elf_gregset_ppc64_t[ELF_NGREG_PPC64]; struct elf_prstatus_ppc64 { char pad[112]; elf_gregset_ppc64_t pr_reg; /* GP registers */ __u32 pr_fpvalid; /* True if math co-processor being used. */ }; /* * ia64 specific */ struct _ia64_fpreg { union { __u64 bits[2]; } u; } __attribute__ ((aligned (16))); struct user_regs_struct_ia64 { /* The following registers are saved by SAVE_MIN: */ __u64 b6; /* scratch */ __u64 b7; /* scratch */ __u64 ar_csd; /* used by cmp8xchg16 (scratch) */ __u64 ar_ssd; /* reserved for future use (scratch) */ __u64 r8; /* scratch (return value register 0) */ __u64 r9; /* scratch (return value register 1) */ __u64 r10; /* scratch (return value register 2) */ __u64 r11; /* scratch (return value register 3) */ __u64 cr_ipsr; /* interrupted task's psr */ __u64 cr_iip; /* interrupted task's instruction pointer */ __u64 cr_ifs; /* interrupted task's function state */ __u64 ar_unat; /* interrupted task's NaT register (preserved) */ __u64 ar_pfs; /* prev function state */ __u64 ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ __u64 ar_rnat; /* RSE NaT */ __u64 ar_bspstore; /* RSE bspstore */ __u64 pr; /* 64 predicate registers (1 bit each) */ __u64 b0; /* return pointer (bp) */ __u64 loadrs; /* size of dirty partition << 16 */ __u64 r1; /* the gp pointer */ __u64 r12; /* interrupted task's memory stack pointer */ __u64 r13; /* thread pointer */ __u64 ar_fpsr; /* floating point status (preserved) */ __u64 r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ __u64 r14; /* scratch */ __u64 r2; /* scratch */ __u64 r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ __u64 r16; /* scratch */ __u64 r17; /* scratch */ __u64 r18; /* scratch */ __u64 r19; /* scratch */ __u64 r20; /* scratch */ __u64 r21; /* scratch */ __u64 r22; /* scratch */ __u64 r23; /* scratch */ __u64 r24; /* scratch */ __u64 r25; /* scratch */ __u64 r26; /* scratch */ __u64 r27; /* scratch */ __u64 r28; /* scratch */ __u64 r29; /* scratch */ __u64 r30; /* scratch */ __u64 r31; /* scratch */ __u64 ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct _ia64_fpreg f6; /* scratch */ struct _ia64_fpreg f7; /* scratch */ struct _ia64_fpreg f8; /* scratch */ struct _ia64_fpreg f9; /* scratch */ struct _ia64_fpreg f10; /* scratch */ struct _ia64_fpreg f11; /* scratch */ }; #define ELF_NGREG_IA64 (sizeof (struct user_regs_struct_ia64) / sizeof(__u64)) typedef __u64 elf_gregset_ia64_t[ELF_NGREG_IA64]; struct elf_prstatus_ia64 { char pad[112]; elf_gregset_ia64_t pr_reg; /* GP registers */ __u32 pr_fpvalid; /* True if math co-processor being used. */ }; /* * arm64 specific */ struct user_pt_regs_arm64 { __u64 regs[31]; __u64 sp; __u64 pc; __u64 pstate; }; #define ELF_NGREG_ARM64 (sizeof (struct user_pt_regs_arm64) / sizeof(elf_greg_t)) #ifndef elf_greg_t typedef unsigned long elf_greg_t; #endif typedef elf_greg_t elf_gregset_arm64_t[ELF_NGREG_ARM64]; struct elf_prstatus_arm64 { char pad[112]; elf_gregset_arm64_t pr_reg; int pr_fpvalid; }; union prstatus { struct elf_prstatus_i386 x86; struct elf_prstatus_x86_64 x86_64; struct elf_prstatus_ppc64 ppc64; struct elf_prstatus_ia64 ia64; struct elf_prstatus_arm64 arm64; }; static size_t dump_elf_note(char *buf, Elf64_Word type, char *name, char *desc, int d_len) { Elf64_Nhdr *note; size_t len; note = (Elf64_Nhdr *)buf; note->n_namesz = strlen(name); note->n_descsz = d_len; note->n_type = type; len = sizeof(Elf64_Nhdr); memcpy(buf + len, name, note->n_namesz); len = roundup(len + note->n_namesz, 4); memcpy(buf + len, desc, note->n_descsz); len = roundup(len + note->n_descsz, 4); return len; } char * generate_elf_header(int type, int fd, char *filename) { int i, n; char *buffer, *ptr; Elf64_Ehdr *elf; Elf64_Phdr *notes; Elf64_Phdr *load; size_t offset, len, l_offset; size_t data_offset; struct elf_prpsinfo_64 prpsinfo; union prstatus prstatus; int prstatus_len; ushort e_machine; int num_segments; struct node_table *nt; ulonglong task_struct; num_segments = vt->numnodes; if (machine_type("X86_64")) { e_machine = EM_X86_64; prstatus_len = sizeof(prstatus.x86_64); num_segments += 1; /* mapped kernel section for phys_base */ } else if (machine_type("X86")) { e_machine = EM_386; prstatus_len = sizeof(prstatus.x86); } else if (machine_type("IA64")) { e_machine = EM_IA_64; prstatus_len = sizeof(prstatus.ia64); num_segments += 1; /* mapped kernel section for phys_start */ } else if (machine_type("PPC64")) { e_machine = EM_PPC64; prstatus_len = sizeof(prstatus.ppc64); } else if (machine_type("ARM64")) { e_machine = EM_AARCH64; prstatus_len = sizeof(prstatus.arm64); } else return NULL; /* should be enought for the notes + roundup + two blocks */ buffer = (char *)GETBUF(sizeof(Elf64_Ehdr) + num_segments * sizeof(Elf64_Phdr) + PAGESIZE() * 2); offset = 0; ptr = buffer; /* Elf header */ elf = (Elf64_Ehdr *)ptr; memcpy(elf->e_ident, ELFMAG, SELFMAG); elf->e_ident[EI_CLASS] = ELFCLASS64; #if __BYTE_ORDER == __BIG_ENDIAN elf->e_ident[EI_DATA] = ELFDATA2MSB; #else elf->e_ident[EI_DATA] = ELFDATA2LSB; #endif elf->e_ident[EI_VERSION] = EV_CURRENT; elf->e_ident[EI_OSABI] = ELFOSABI_SYSV; elf->e_ident[EI_ABIVERSION] = 0; memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); elf->e_type = ET_CORE; elf->e_machine = e_machine; elf->e_version = EV_CURRENT; elf->e_entry = 0; elf->e_phoff = sizeof(Elf64_Ehdr); elf->e_shoff = 0; elf->e_flags = 0; elf->e_ehsize = sizeof(Elf64_Ehdr); elf->e_phentsize = sizeof(Elf64_Phdr); elf->e_phnum = 1 + num_segments; elf->e_shentsize = 0; elf->e_shnum = 0; elf->e_shstrndx = 0; offset += sizeof(Elf64_Ehdr); ptr += sizeof(Elf64_Ehdr); /* PT_NOTE */ notes = (Elf64_Phdr *)ptr; notes->p_type = PT_NOTE; notes->p_offset = 0; /* TO BE FILLED IN */ notes->p_vaddr = 0; notes->p_paddr = 0; notes->p_filesz = 0; /* TO BE FILLED IN */ notes->p_memsz = 0; notes->p_flags = 0; notes->p_align = 0; offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); /* PT_LOAD */ load = (Elf64_Phdr *)ptr; for (i = n = 0; i < num_segments; i++) { load[i].p_type = PT_LOAD; load[i].p_offset = 0; /* TO BE FILLED IN */ switch (e_machine) { case EM_X86_64: nt = &vt->node_table[n]; if (i == 0) { #ifdef X86_64 load[i].p_vaddr = __START_KERNEL_map; load[i].p_paddr = machdep->machspec->phys_base; #endif load[i].p_filesz = 0; load[i].p_memsz = load[i].p_filesz; } else { load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; n++; } load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = 0; break; case EM_386: nt = &vt->node_table[n++]; load[i].p_vaddr = 0; load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_IA_64: nt = &vt->node_table[n]; if (i == 0) { #ifdef IA64 load[i].p_vaddr = machdep->machspec->kernel_start; load[i].p_paddr = machdep->machspec->phys_start; #endif load[i].p_filesz = 0; load[i].p_memsz = load[i].p_filesz; } else { load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; n++; } load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_PPC64: nt = &vt->node_table[n++]; load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_AARCH64: nt = &vt->node_table[n++]; load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; } // l_offset += load[i].p_filesz; offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); } notes->p_offset = offset; /* NT_PRSTATUS note */ memset(&prstatus, 0, sizeof(prstatus)); len = dump_elf_note(ptr, NT_PRSTATUS, "CORE", (char *)&prstatus, prstatus_len); offset += len; ptr += len; notes->p_filesz += len; /* NT_PRPSINFO note */ memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo_64)); prpsinfo.pr_state = 0; prpsinfo.pr_sname = 'R'; prpsinfo.pr_zomb = 0; strcpy(prpsinfo.pr_fname, "vmlinux"); len = dump_elf_note(ptr, NT_PRPSINFO, "CORE", (char *)&prpsinfo, sizeof(prpsinfo)); offset += len; ptr += len; notes->p_filesz += len; /* NT_TASKSTRUCT note */ task_struct = CURRENT_TASK(); len = dump_elf_note (ptr, NT_TASKSTRUCT, "SNAP", (char *)&task_struct, sizeof(ulonglong)); offset += len; ptr += len; notes->p_filesz += len; if (type == NETDUMP_ELF64) offset = roundup (offset, PAGESIZE()); l_offset = offset; for (i = 0; i < num_segments; i++) { load[i].p_offset = l_offset; l_offset += load[i].p_filesz; } data_offset = offset; while (offset > 0) { len = write(fd, buffer + (data_offset - offset), offset); if (len < 0) { perror(filename); FREEBUF(buffer); return NULL; } offset -= len; } return buffer; } struct ram_segments { physaddr_t start; physaddr_t end; }; static struct ram_segments *ram_segments = NULL; static int nr_segments = 0; static void init_ram_segments(void) { int i, errflag; FILE *iomem; char buf[BUFSIZE], *p1, *p2; physaddr_t start, end; if ((iomem = fopen("/proc/iomem", "r")) == NULL) goto fail_iomem; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, "System RAM")) { console(buf); nr_segments++; } } if (!nr_segments) goto fail_iomem; ram_segments = (struct ram_segments *) GETBUF(sizeof(struct ram_segments) * nr_segments); rewind(iomem); i = 0; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, "System RAM")) { if (!(p1 = strstr(buf, ":"))) goto fail_iomem; *p1 = NULLCHAR; clean_line(buf); if (strstr(buf, " ")) goto fail_iomem; p1 = buf; if (!(p2 = strstr(buf, "-"))) goto fail_iomem; *p2 = NULLCHAR; p2++; errflag = 0; start = htoll(p1, RETURN_ON_ERROR|QUIET, &errflag); end = htoll(p2, RETURN_ON_ERROR|QUIET, &errflag); if (errflag) goto fail_iomem; ram_segments[i].start = PHYSPAGEBASE(start); if (PAGEOFFSET(start)) ram_segments[i].start += PAGESIZE(); ram_segments[i].end = PHYSPAGEBASE(end); if (PAGEOFFSET(end) == (PAGESIZE()-1)) ram_segments[i].end += PAGESIZE(); console("ram_segments[%d]: %016llx %016llx [%s-%s]\n", i, (ulonglong)ram_segments[i].start, (ulonglong)ram_segments[i].end, p1, p2); i++; } } fclose(iomem); return; fail_iomem: fclose(iomem); nr_segments = 0; if (ram_segments) FREEBUF(ram_segments); return; } static int verify_paddr(physaddr_t paddr) { int i, ok; if (!machdep->verify_paddr(paddr)) return FALSE; if (!nr_segments) return TRUE; for (i = ok = 0; i < nr_segments; i++) { if ((paddr >= ram_segments[i].start) && (paddr < ram_segments[i].end)) { ok++; break; } } /* * Pre-2.6.13 x86_64 /proc/iomem was restricted to 4GB, * so just accept it. */ if ((paddr >= 0x100000000ULL) && machine_type("X86_64") && (THIS_KERNEL_VERSION < LINUX(2,6,13))) ok++; if (!ok) { if (CRASHDEBUG(1)) console("reject: %llx\n", (ulonglong)paddr); return FALSE; } return TRUE; } /* * Borrowed from makedumpfile, prints a percentage-done value * once per second. */ static int print_progress(const char *filename, ulong current) { int n, progress; time_t tm; struct node_table *nt; static time_t last_time = 0; static ulong total_pages = 0; static ulong written_pages = 0; if (!total_pages) { for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; total_pages += nt->size; } } if (received_SIGINT()) { fprintf(stderr, "\n\n"); return FALSE; } if (++written_pages < total_pages) { tm = time(NULL); if (tm - last_time < 1) return TRUE; last_time = tm; progress = written_pages * 100 / total_pages; } else progress = 100; fprintf(stderr, "\r%s: [%2d%%] ", filename, progress); return TRUE; } crash-7.1.4/extensions/dminfo.c0000664000000000000000000012307512634305150015151 0ustar rootroot/* dminfo.c - crash extension module for device-mapper analysis * * Copyright (C) 2005 NEC Corporation * Copyright (C) 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* From the crash source top-level directory */ void dminfo_init(void); void dminfo_fini(void); /* * Indices of size-offset array (Used by GET_xxx macros) * * DM__ */ enum { DM_hash_cell_name_list = 0, DM_hash_cell_name, DM_hash_cell_md, DM_mapped_device_disk, DM_mapped_device_map, DM_gendisk_major, DM_gendisk_first_minor, DM_gendisk_disk_name, DM_dm_table_num_targets, DM_dm_table_targets, DM_dm_table_devices, DM_dm_target_type, DM_dm_target_begin, DM_dm_target_len, DM_dm_target_private, DM_dm_dev_count, DM_dm_dev_bdev, DM_dm_dev_name, DM_dm_io_md, DM_dm_io_bio, DM_target_type_name, DM_target_io_io, DM_block_device_bd_disk, DM_bio_bi_private, DM_bio_list_head, DM_linear_c_dev, DM_linear_c_start, DM_multipath_hw_handler, DM_multipath_nr_priority_groups, DM_multipath_priority_groups, DM_multipath_nr_valid_paths, DM_multipath_current_pg, DM_multipath_queue_if_no_path, DM_multipath_queue_size, DM_hw_handler_type, DM_hw_handler_type_name, DM_priority_group_ps, DM_priority_group_pg_num, DM_priority_group_bypassed, DM_priority_group_nr_pgpaths, DM_priority_group_pgpaths, DM_path_selector_type, DM_path_selector_type_name, DM_pgpath_fail_count, DM_pgpath_path, DM_path_dev, DM_path_is_active, DM_mirror_set_rh, DM_mirror_set_reads, DM_mirror_set_writes, DM_mirror_set_in_sync, DM_mirror_set_nr_mirrors, DM_mirror_set_mirror, DM_region_hash_log, DM_region_hash_quiesced_regions, DM_region_hash_recovered_regions, DM_dirty_log_type, DM_dirty_log_type_name, DM_mirror_error_count, DM_mirror_dev, DM_mirror_offset, DM_crypt_config_dev, DM_crypt_config_iv_mode, DM_crypt_config_tfm, DM_crypt_config_key_size, DM_crypt_config_key, DM_crypto_tfm_crt_u, DM_crypto_tfm___crt_alg, DM_crypto_alg_cra_name, DM_cipher_tfm_cit_mode, DM_stripe_c_stripes, DM_stripe_c_chunk_mask, DM_stripe_c_stripe, DM_stripe_dev, DM_dm_snapshot_origin, DM_dm_snapshot_cow, DM_dm_snapshot_chunk_size, DM_dm_snapshot_valid, DM_dm_snapshot_type, NR_DMINFO_MEMBER_TABLE_ENTRY }; /* Size-offset array for structure's member */ static struct dminfo_member_entry { unsigned long offset; unsigned long size; } mbr_ary[NR_DMINFO_MEMBER_TABLE_ENTRY]; /* * Macros to retrieve data of given structure's member * * Macros except for the MSG assume 'struct s' is at 'addr' */ #define MSG(msg, s, m) msg ": " s "." m /* Initialize the size-offset array */ #define INIT_MBR_TABLE(s, m) \ do { \ if (!mbr_ary[DM_##s##_##m].size) { \ mbr_ary[DM_##s##_##m].offset = MEMBER_OFFSET("struct " #s, #m); \ mbr_ary[DM_##s##_##m].size = MEMBER_SIZE("struct " #s, #m); \ } \ } while (0) /* * Store the data of member m in ret. * Initialize the size-offset array for the member m if needed. */ #define GET_VALUE(addr, s, m, ret) \ do { \ INIT_MBR_TABLE(s, m); \ if (sizeof(ret) < mbr_ary[DM_##s##_##m].size) \ fprintf(fp, "%s\n", \ MSG("ERROR: GET_VALUE size_check", #s, #m)); \ readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &ret, \ mbr_ary[DM_##s##_##m].size, MSG("GET_VALUE", #s, #m), \ FAULT_ON_ERROR);\ } while (0) /* * Store the address of member m in ret. * Initialize the size-offset array for the member m if needed. */ #define GET_ADDR(addr, s, m, ret) \ do { \ INIT_MBR_TABLE(s, m); \ ret = addr + mbr_ary[DM_##s##_##m].offset; \ } while (0) /* * Store the string data of member m in ret. * Initialize the size-offset array for the member m if needed. */ #define GET_STR(addr, s, m, ret, len) \ do { \ INIT_MBR_TABLE(s, m); \ if (!read_string(addr + mbr_ary[DM_##s##_##m].offset, ret, len - 1)) \ fprintf(fp, "%s\n", MSG("ERROR: GET_STR", #s, #m)); \ } while (0) /* * Store the string data pointed by member m in ret. * Initialize the size-offset array for the member m if needed. */ #define GET_PTR_STR(addr, s, m, ret, len) \ do { \ unsigned long tmp; \ INIT_MBR_TABLE(s, m); \ readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &tmp, \ mbr_ary[DM_##s##_##m].size, MSG("GET_PTR_STR", #s, #m),\ FAULT_ON_ERROR);\ if (!read_string(tmp, ret, len - 1)) \ fprintf(fp, "%s\n", MSG("ERROR: GET_PTR_STR", #s, #m));\ } while (0) /* * Utility function/macro to walk the list */ static unsigned long get_next_from_list_head(unsigned long addr) { unsigned long ret; readmem(addr + OFFSET(list_head_next), KVADDR, &ret, sizeof(void *), MSG("get_next_from_list_head", "list_head", "next"), FAULT_ON_ERROR); return ret; } #define list_for_each(next, head, last) \ for (next = get_next_from_list_head(head), last = 0UL; \ next && next != head && next != last; \ last = next, next = get_next_from_list_head(next)) /* * device-mapper target analyzer * * device-mapper has various target driver: linear, mirror, multipath, etc. * Information specific to target is stored in its own way. * Target-specific analyzer is provided for each target driver for this reason. */ static struct dminfo_target_analyzer { struct dminfo_target_analyzer *next; char *target_name; int (*ready) (void); /* returns true if analyzer is available */ void (*show_table) (unsigned long); /* display table info */ void (*show_status) (unsigned long); /* display status info */ void (*show_queue) (unsigned long); /* display queued I/O info */ } analyzers_head; static void dminfo_register_target_analyzer(struct dminfo_target_analyzer *ta) { ta->next = analyzers_head.next; analyzers_head.next = ta; } static struct dminfo_target_analyzer *find_target_analyzer(char *target_type) { struct dminfo_target_analyzer *ta; for (ta = analyzers_head.next; ta; ta = ta->next) if (!strcmp(ta->target_name, target_type)) return ta; return NULL; } /* * zero target */ static int zero_ready(void) { return 1; } static void zero_show_table(unsigned long target) { unsigned long long start, len; /* Get target information */ GET_VALUE(target, dm_target, begin, start); GET_VALUE(target, dm_target, len, len); fprintf(fp, " begin:%llu len:%llu", start, len); } static void zero_show_status(unsigned long target) { /* zero target has no status */ fprintf(fp, " No status info"); } static void zero_show_queue(unsigned long target) { /* zero target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer zero_analyzer = { .target_name = "zero", .ready = zero_ready, .show_table = zero_show_table, .show_status = zero_show_status, .show_queue = zero_show_queue }; /* * error target */ static int error_ready(void) { return 1; } static void error_show_table(unsigned long target) { unsigned long long start, len; /* Get target information */ GET_VALUE(target, dm_target, begin, start); GET_VALUE(target, dm_target, len, len); fprintf(fp, " begin:%llu len:%llu", start, len); } static void error_show_status(unsigned long target) { /* error target has no status */ fprintf(fp, " No status info"); } static void error_show_queue(unsigned long target) { /* error target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer error_analyzer = { .target_name = "error", .ready = error_ready, .show_table = error_show_table, .show_status = error_show_status, .show_queue = error_show_queue }; /* * linear target */ static int linear_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct linear_c")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: linear_c"); return 0; } static void linear_show_table(unsigned long target) { unsigned long lc, dm_dev; unsigned long long start, len, offset; char devt[BUFSIZE]; /* Get target information */ GET_VALUE(target, dm_target, begin, start); GET_VALUE(target, dm_target, len, len); GET_VALUE(target, dm_target, private, lc); GET_VALUE(lc, linear_c, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, devt, BUFSIZE); GET_VALUE(lc, linear_c, start, offset); fprintf(fp, " begin:%llu len:%llu dev:%s offset:%llu", start, len, devt, offset); } static void linear_show_status(unsigned long target) { /* linear target has no status */ fprintf(fp, " No status info"); } static void linear_show_queue(unsigned long target) { /* linear target has no I/O queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer linear_analyzer = { .target_name = "linear", .ready = linear_ready, .show_table = linear_show_table, .show_status = linear_show_status, .show_queue = linear_show_queue }; /* * mirror target */ static int mirror_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct mirror_set")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: mirror_set"); return 0; } static void mirror_show_table(unsigned long target) { unsigned int i, nr_mir; unsigned long ms, rh, log, log_type, mir_size, mir_head, mir, dm_dev; unsigned long long offset; char buf[BUFSIZE]; /* Get the address of struct mirror_set */ GET_VALUE(target, dm_target, private, ms); /* Get the log-type name of the mirror_set */ GET_ADDR(ms, mirror_set, rh, rh); GET_VALUE(rh, region_hash, log, log); GET_VALUE(log, dirty_log, type, log_type); GET_PTR_STR(log_type, dirty_log_type, name, buf, BUFSIZE); fprintf(fp, " log:%s", buf); /* * Display information for each mirror disks. * * mir_head = mirror_set.mirror. * This is the head of struct mirror array. */ fprintf(fp, " dev:"); mir_size = STRUCT_SIZE("struct mirror"); GET_ADDR(ms, mirror_set, mirror, mir_head); GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir); for (i = 0; i < nr_mir; i++) { mir = mir_head + mir_size * i; /* Get next mirror */ /* Get the devt of the mirror disk */ GET_VALUE(mir, mirror, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); /* Get the offset of the mirror disk */ GET_VALUE(mir, mirror, offset, offset); fprintf(fp, "%s(%llu)%s", buf, offset, i == nr_mir - 1 ? "" : ","); } if (i != nr_mir) fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir); } static void mirror_show_status(unsigned long target) { unsigned int i, nr_mir, synced, nr_error; unsigned long ms, mir_size, mir_head, mir, dm_dev; char buf[BUFSIZE]; /* Get the address of struct mirror_set */ GET_VALUE(target, dm_target, private, ms); /* Get the status info of the mirror_set */ GET_VALUE(ms, mirror_set, in_sync, synced); fprintf(fp, " in_sync:%d", synced); /* * Display information for each mirror disks. * * mir_head = mirror_set.mirror. * This is the head of struct mirror array. */ fprintf(fp, " dev:"); mir_size = STRUCT_SIZE("struct mirror"); GET_ADDR(ms, mirror_set, mirror, mir_head); GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir); for (i = 0; i < nr_mir; i++) { mir = mir_head + mir_size * i; /* Get next mirror */ /* Get the devt of the mirror disk */ GET_VALUE(mir, mirror, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); /* Get the offset of the mirror disk */ GET_VALUE(mir, mirror, error_count, nr_error); fprintf(fp, "%s(%c,%d)%s", buf, nr_error ? 'D' : 'A', nr_error, i == nr_mir - 1 ? "" : ","); } if (i != nr_mir) fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir); } static void mirror_show_queue(unsigned long target) { unsigned long ms, rlist, wlist, rhead, whead; unsigned long rh, quis_head, rcov_head, quis_next, rcov_next; /* Get the address of struct mirror_set */ GET_VALUE(target, dm_target, private, ms); /* Get the address of queued I/O lists in struct mirror_set */ GET_ADDR(ms, mirror_set, reads, rlist); GET_ADDR(ms, mirror_set, writes, wlist); /* Get the head of queued I/O lists */ GET_VALUE(rlist, bio_list, head, rhead); GET_VALUE(wlist, bio_list, head, whead); fprintf(fp, " %s", rhead ? "reads" : "(reads)"); fprintf(fp, " %s", whead ? "writes" : "(writes)"); /* Get the address of the struct region_hash */ GET_ADDR(ms, mirror_set, rh, rh); /* Get the address of recover region lists in struct region_hash */ GET_ADDR(rh, region_hash, quiesced_regions, quis_head); GET_ADDR(rh, region_hash, recovered_regions, rcov_head); /* Get the head of recover region lists */ quis_next = get_next_from_list_head(quis_head); rcov_next = get_next_from_list_head(rcov_head); fprintf(fp, " %s", quis_next != quis_head ? "quiesced" : "(quiesced)"); fprintf(fp, " %s", rcov_next != rcov_head ? "recovered" : "(recovered)"); } static struct dminfo_target_analyzer mirror_analyzer = { .target_name = "mirror", .ready = mirror_ready, .show_table = mirror_show_table, .show_status = mirror_show_status, .show_queue = mirror_show_queue }; /* * multipath target */ static int multipath_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct multipath")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: multipath"); return 0; } static void multipath_show_table(unsigned long target) { int i, j; unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths; unsigned long mp, hwh, hwh_type, ps, ps_type, path, dm_dev; unsigned long pg_head, pg_next, pg_last; unsigned long path_head, path_next, path_last; char name[BUFSIZE]; /* Get the address of struct multipath */ GET_VALUE(target, dm_target, private, mp); /* Get features information */ GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path); /* Get the hardware-handler information */ GET_ADDR(mp, multipath, hw_handler, hwh); GET_VALUE(hwh, hw_handler, type, hwh_type); if (hwh_type) GET_PTR_STR(hwh_type, hw_handler_type, name, name, BUFSIZE); else strcpy(name, "none"); /* Get the number of priority groups */ GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs); fprintf(fp, " queue_if_no_path:%d hwh:%s nr_pgs:%d\n", queue_if_no_path, name, nr_pgs); /* Display information for each priority group */ fprintf(fp, " %-2s %-13s %-8s %s", "PG", "PATH_SELECTOR", "NR_PATHS", "PATHS"); GET_ADDR(mp, multipath, priority_groups, pg_head); i = 0; list_for_each (pg_next, pg_head, pg_last) { /* pg_next == struct priority_group */ /* Get the index of the priority group */ GET_VALUE(pg_next, priority_group, pg_num, pg_id); /* Get the name of path selector */ GET_ADDR(pg_next, priority_group, ps, ps); GET_VALUE(ps, path_selector, type, ps_type); GET_PTR_STR(ps_type, path_selector_type, name, name, BUFSIZE); /* Get the number of paths in the priority group */ GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths); fprintf(fp, "\n %-2d %-13s %-8d ", pg_id, name, nr_paths); /* Display information for each path */ GET_ADDR(pg_next, priority_group, pgpaths, path_head); j = 0; list_for_each (path_next, path_head, path_last) { /* path_next == struct pgpath */ /* Get the devt of the pgpath */ GET_ADDR(path_next, pgpath, path, path); GET_VALUE(path, path, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, name, BUFSIZE); fprintf(fp, " %s", name); j++; } if (j != nr_paths) fprintf(fp, " ERROR: paths are less than nr_paths:%d", nr_paths); i++; } if (i != nr_pgs) fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs); } static void multipath_show_status(unsigned long target) { int i, j; unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths; unsigned int bypassed_pg, path_active, nr_fails; unsigned long mp, hwh, hwh_type, cur_pg, path, dm_dev; unsigned long pg_head, pg_next, pg_last; unsigned long path_head, path_next, path_last; char buf[BUFSIZE], path_status; /* Get the address of struct multipath */ GET_VALUE(target, dm_target, private, mp); /* Get features information */ GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path); /* Get the hardware-handler information */ GET_ADDR(mp, multipath, hw_handler, hwh); GET_VALUE(hwh, hw_handler, type, hwh_type); if (hwh_type) GET_PTR_STR(hwh_type, hw_handler_type, name, buf, BUFSIZE); else strcpy(buf, "none"); /* Get the number of priority groups */ GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs); fprintf(fp, " queue_if_no_path:%d hwh:%s nr_pgs:%d\n", queue_if_no_path, buf, nr_pgs); /* Display information for each priority group */ fprintf(fp, " %-2s %-9s %-8s %s", "PG", "PG_STATUS", "NR_PATHS", "PATHS"); GET_ADDR(mp, multipath, priority_groups, pg_head); i = 0; list_for_each (pg_next, pg_head, pg_last) { /* pg_next == struct priority_group */ /* Get the index of the priority group */ GET_VALUE(pg_next, priority_group, pg_num, pg_id); /* Get the status of the priority group */ GET_VALUE(pg_next, priority_group, bypassed, bypassed_pg); if (bypassed_pg) strcpy(buf, "disabled"); else { GET_VALUE(mp, multipath, current_pg, cur_pg); if (pg_next == cur_pg) strcpy(buf, "active"); else strcpy(buf, "enabled"); } /* Get the number of paths in the priority group */ GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths); fprintf(fp, "\n %-2d %-9s %-8d ", pg_id, buf, nr_paths); /* Display information for each path */ GET_ADDR(pg_next, priority_group, pgpaths, path_head); j = 0; list_for_each (path_next, path_head, path_last) { /* path_next == struct pgpath */ /* Get the devt of the pgpath */ GET_ADDR(path_next, pgpath, path, path); GET_VALUE(path, path, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); /* Get the status of the path */ GET_VALUE(path, path, is_active, path_active); GET_VALUE(path_next, pgpath, fail_count, nr_fails); path_status = path_active ? 'A' : 'F'; fprintf(fp, " %s(%c,%u)", buf, path_status, nr_fails); j++; } if (j != nr_paths) fprintf(fp, " ERROR: paths are less than nr_paths:%d", nr_paths); i++; } if (i != nr_pgs) fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs); } static void multipath_show_queue(unsigned long target) { unsigned int queue_size; unsigned long mp; /* Get the address of struct multipath */ GET_VALUE(target, dm_target, private, mp); /* Get the size of queued I/Os in this 'target' */ GET_VALUE(mp, multipath, queue_size, queue_size); fprintf(fp, " queue_size:%d", queue_size); } static struct dminfo_target_analyzer multipath_analyzer = { .target_name = "multipath", .ready = multipath_ready, .show_table = multipath_show_table, .show_status = multipath_show_status, .show_queue = multipath_show_queue }; /* * crypt target */ static int crypt_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct crypt_config")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: crypt_config"); return 0; } #define DMINFO_CRYPTO_TFM_MODE_ECB 0x00000001 #define DMINFO_CRYPTO_TFM_MODE_CBC 0x00000002 static void crypt_show_table(unsigned long target) { int i, cit_mode, key_size; unsigned long cc, tfm, crt_alg, cipher, iv_mode, dm_dev; char buf[BUFSIZE], *chainmode; /* Get the address of struct crypt_config */ GET_VALUE(target, dm_target, private, cc); /* Get the cipher name of the crypt_tfm */ GET_VALUE(cc, crypt_config, tfm, tfm); GET_VALUE(tfm, crypto_tfm, __crt_alg, crt_alg); GET_STR(crt_alg, crypto_alg, cra_name, buf, BUFSIZE); fprintf(fp, " type:%s", buf); /* Get the cit_mode of the crypt_tfm */ GET_ADDR(tfm, crypto_tfm, crt_u, cipher); GET_VALUE(cipher, cipher_tfm, cit_mode, cit_mode); if (MEMBER_EXISTS("struct crypt_config", "iv_mode")) { if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC) chainmode = "cbc"; else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) chainmode = "ecb"; else chainmode = "unknown"; /* Get the iv_mode of the crypt_config */ GET_VALUE(cc, crypt_config, iv_mode, iv_mode); if (iv_mode) { GET_PTR_STR(cc, crypt_config, iv_mode, buf, BUFSIZE); fprintf(fp, "-%s-%s", chainmode, buf); } else fprintf(fp, "-%s", chainmode); } else { /* Compatibility mode for old dm-crypt cipher strings */ if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC) chainmode = "plain"; else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) chainmode = "ecb"; else chainmode = "unknown"; fprintf(fp, "-%s", chainmode); } /* Get the devt of the crypt_config */ GET_VALUE(cc, crypt_config, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); fprintf(fp, " dev:%s", buf); /* * Get the key of the crypt_config. */ GET_VALUE(cc, crypt_config, key_size, key_size); GET_STR(cc, crypt_config, key, buf, MIN(key_size + 1, BUFSIZE)); fprintf(fp, " key:"); for (i = 0; i < key_size; i++) fprintf(fp, "%02x", (unsigned char)buf[i]); } static void crypt_show_status(unsigned long target) { /* crypt target has no status */ fprintf(fp, " No status info"); } static void crypt_show_queue(unsigned long target) { /* crypt target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer crypt_analyzer = { .target_name = "crypt", .ready = crypt_ready, .show_table = crypt_show_table, .show_status = crypt_show_status, .show_queue = crypt_show_queue }; /* * stripe target */ static int stripe_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct stripe_c")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: stripe_c"); return 0; } static void stripe_show_table(unsigned long target) { unsigned int i, n_stripe; unsigned long sc, stripe_size, s, head, dm_dev; unsigned long long mask; char buf[BUFSIZE]; /* Get the address of struct stripe_c */ GET_VALUE(target, dm_target, private, sc); /* Get the chunk_size of the stripe_c */ GET_VALUE(sc, stripe_c, chunk_mask, mask); fprintf(fp, " chunk_size:%llu", mask + 1); /* * Display the information of each stripe disks. * * head = stripe_c.stripe. * This is the head of struct stripe array. */ stripe_size = STRUCT_SIZE("struct stripe"); GET_ADDR(sc, stripe_c, stripe, head); GET_VALUE(sc, stripe_c, stripes, n_stripe); fprintf(fp, " dev:"); for (i = 0; i < n_stripe; i++) { s = head + stripe_size * i; /* Get next stripe */ /* Get the devt of the stripe disk */ GET_VALUE(s, stripe, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); fprintf(fp, "%s%s", buf, i == n_stripe - 1 ? "" : ","); } if (i != n_stripe) fprintf(fp, " ERROR: dev are less than n_stripe:%d", n_stripe); } static void stripe_show_status(unsigned long target) { /* stripe target has no status */ fprintf(fp, " No status info"); } static void stripe_show_queue(unsigned long target) { /* stripe target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer stripe_analyzer = { .target_name = "striped", .ready = stripe_ready, .show_table = stripe_show_table, .show_status = stripe_show_status, .show_queue = stripe_show_queue }; /* * snapshot target */ static int snapshot_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct dm_snapshot")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: dm_snapshot"); return 0; } static void snapshot_show_table(unsigned long target) { unsigned long snap, orig_dev, cow_dev; unsigned long long chunk_size; char orig_name[BUFSIZE], cow_name[BUFSIZE], type; /* Get the address of struct dm_snapshot */ GET_VALUE(target, dm_target, private, snap); /* Get snapshot parameters of the dm_snapshot */ GET_VALUE(snap, dm_snapshot, origin, orig_dev); GET_STR(orig_dev, dm_dev, name, orig_name, BUFSIZE); GET_VALUE(snap, dm_snapshot, cow, cow_dev); GET_STR(cow_dev, dm_dev, name, cow_name, BUFSIZE); GET_VALUE(snap, dm_snapshot, type, type); GET_VALUE(snap, dm_snapshot, chunk_size, chunk_size); fprintf(fp, " orig:%s cow:%s type:%c chunk_size:%llu", orig_name, cow_name, type, chunk_size); } static void snapshot_show_status(unsigned long target) { int valid; unsigned long snap; /* Get the address of struct dm_snapshot */ GET_VALUE(target, dm_target, private, snap); /* Get snapshot parameters of the dm_snapshot */ GET_VALUE(snap, dm_snapshot, valid, valid); fprintf(fp, " vaild:%d", valid); } static void snapshot_show_queue(unsigned long target) { fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer snapshot_analyzer = { .target_name = "snapshot", .ready = snapshot_ready, .show_table = snapshot_show_table, .show_status = snapshot_show_status, .show_queue = snapshot_show_queue }; /* * snapshot-origin target */ static int origin_ready(void) { return 1; } static void origin_show_table(unsigned long target) { unsigned long dm_dev; char buf[BUFSIZE]; /* Get the name of the struct dm_dev */ GET_VALUE(target, dm_target, private, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); fprintf(fp, " orig_dev:%s", buf); } static void origin_show_status(unsigned long target) { /* snapshot-origin target has no status */ fprintf(fp, " No status info"); } static void origin_show_queue(unsigned long target) { /* snapshot-origin target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer snapshot_origin_analyzer = { .target_name = "snapshot-origin", .ready = origin_ready, .show_table = origin_show_table, .show_status = origin_show_status, .show_queue = origin_show_queue }; /* * Core part of dminfo */ #define DMINFO_LIST 0 #define DMINFO_DEPS 1 #define DMINFO_TABLE 2 #define DMINFO_STATUS 3 #define DMINFO_QUEUE 4 static int dm_core_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct hash_cell")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: hash_cell\n"); return 0; } /* Display dependency information of the 'table' */ static void dminfo_show_deps(unsigned long table) { int major, minor, count; unsigned long head, next, last, dev, bdev; char buf[BUFSIZE]; /* head = dm_table.devices */ GET_ADDR(table, dm_table, devices, head); fprintf(fp, " %-3s %-3s %-16s %-5s %s\n", "MAJ", "MIN", "GENDISK", "COUNT", "DEVNAME"); list_for_each (next, head, last) { /* Get dependency information. (next == struct *dm_dev) */ GET_VALUE(next, dm_dev, count, count); GET_VALUE(next, dm_dev, bdev, bdev); GET_VALUE(bdev, block_device, bd_disk, dev); GET_VALUE(dev, gendisk, major, major); GET_VALUE(dev, gendisk, first_minor, minor); GET_STR(dev, gendisk, disk_name, buf, BUFSIZE); fprintf(fp, " %-3d %-3d %-16lx %-5d %s\n", major, minor, dev, count, buf); } } /* * Display target specific information in the 'table', if the target * analyzer is registered and available. */ static void dminfo_show_details(unsigned long table, unsigned int num_targets, int info_type) { unsigned int i; unsigned long head, target_size, target, target_type; struct dminfo_target_analyzer *ta; char buf[BUFSIZE]; /* * head = dm_table.targets. * This is the head of struct dm_target array. */ GET_VALUE(table, dm_table, targets, head); target_size = STRUCT_SIZE("struct dm_target"); fprintf(fp, " %-16s %-11s %s\n", "TARGET", "TARGET_TYPE", "PRIVATE_DATA"); for (i = 0; i < num_targets; i++, fprintf(fp, "\n")) { target = head + target_size * i; /* Get next target */ /* Get target information */ GET_VALUE(target, dm_target, type, target_type); GET_PTR_STR(target_type, target_type, name, buf, BUFSIZE); fprintf(fp, " %-16lx %-11s", target, buf); if (!(ta = find_target_analyzer(buf)) || !ta->ready || !ta->ready()) continue; switch (info_type) { case DMINFO_TABLE: if (ta->show_table) ta->show_table(target); break; case DMINFO_STATUS: if (ta->show_status) ta->show_status(target); break; case DMINFO_QUEUE: if (ta->show_queue) ta->show_queue(target); break; default: break; } } if (i != num_targets) fprintf(fp, " ERROR: targets are less than num_targets:%d", num_targets); } /* * Display lists (and detail information if specified) of existing * dm devices. */ static void dminfo_show_list(int additional_info) { int i, major, minor, array_len; unsigned int num_targets; unsigned long _name_buckets, head, next, last, md, dev, table; char buf[BUFSIZE]; _name_buckets = symbol_value("_name_buckets"); array_len = get_array_length("_name_buckets", NULL, 0); if (additional_info == DMINFO_LIST) fprintf(fp, "%-3s %-3s %-16s %-16s %-7s %s\n", "MAJ", "MIN", "MAP_DEV", "DM_TABLE", "TARGETS", "MAPNAME"); for (i = 0; i < array_len; i++) { /* head = _name_buckets[i] */ head = _name_buckets + (i * SIZE(list_head)); list_for_each (next, head, last) { /* next == hash_cell */ /* Get device and table information */ GET_PTR_STR(next, hash_cell, name, buf, BUFSIZE); GET_VALUE(next, hash_cell, md, md); GET_VALUE(md, mapped_device, disk, dev); GET_VALUE(dev, gendisk, major, major); GET_VALUE(dev, gendisk, first_minor, minor); GET_VALUE(md, mapped_device, map, table); GET_VALUE(table, dm_table, num_targets, num_targets); if (additional_info != DMINFO_LIST) fprintf(fp, "%-3s %-3s %-16s %-16s %-7s %s\n", "MAJ", "MIN", "MAP_DEV", "DM_TABLE", "TARGETS", "MAPNAME"); fprintf(fp, "%-3d %-3d %-16lx %-16lx %-7d %s\n", major, minor, md, table, num_targets, buf); switch(additional_info) { case DMINFO_DEPS: dminfo_show_deps(table); break; case DMINFO_TABLE: case DMINFO_STATUS: case DMINFO_QUEUE: dminfo_show_details(table, num_targets, additional_info); break; default: break; } if (additional_info != DMINFO_LIST) fprintf(fp, "\n"); } } } /* * Display the original bio information for the 'bio'. * If the 'bio' is for dm devices, the original bio information is pointed * by bio.bi_private as struct target_io. */ static void dminfo_show_bio(unsigned long bio) { int major, minor; unsigned long target_io, dm_io, dm_bio, md, dev; char buf[BUFSIZE]; /* Get original bio and device information */ GET_VALUE(bio, bio, bi_private, target_io); GET_VALUE(target_io, target_io, io, dm_io); GET_VALUE(dm_io, dm_io, bio, dm_bio); GET_VALUE(dm_io, dm_io, md, md); GET_VALUE(md, mapped_device, disk, dev); GET_VALUE(dev, gendisk, major, major); GET_VALUE(dev, gendisk, first_minor, minor); GET_STR(dev, gendisk, disk_name, buf, BUFSIZE); fprintf(fp, "%-16s %-3s %-3s %-16s %s\n", "DM_BIO_ADDRESS", "MAJ", "MIN", "MAP_DEV", "DEVNAME"); fprintf(fp, "%-16lx %-3d %-3d %-16lx %s\n", dm_bio, major, minor, md, buf); } static void cmd_dminfo(void) { int c, additional_info = DMINFO_LIST; unsigned long bio; if (!dm_core_ready()) return; /* Parse command line option */ while ((c = getopt(argcnt, args, "b:dlqst")) != EOF) { switch(c) { case 'b': bio = stol(optarg, FAULT_ON_ERROR, NULL); dminfo_show_bio(bio); return; case 'd': additional_info = DMINFO_DEPS; break; case 'l': additional_info = DMINFO_LIST; break; case 'q': additional_info = DMINFO_QUEUE; break; case 's': additional_info = DMINFO_STATUS; break; case 't': additional_info = DMINFO_TABLE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); dminfo_show_list(additional_info); } /* * dminfo help */ static char *help_dminfo[] = { "dminfo", /* command name */ "device mapper (dm) information", /* short description */ "[-b bio | -d | -l | -q | -s | -t]", /* argument synopsis */ " This command displays information about device-mapper mapped ", " devices (dm devices).", " If no argument is entered, displays lists of existing dm devices.", " It's same as -l option.", "", " -b bio displays the information of the dm device which the bio", " is submitted in. If the bio isn't for dm devices,", " results will be error.", " -d displays dependency information for existing dm devices.", " -l displays lists of existing dm devices.", " -q displays queued I/O information for each target of", " existing dm devices.", " -s displays status information for each target of existing", " dm devices.", " -t displays table information for each target of existing", " dm devices.", "", "EXAMPLE", " Display lists of dm devices. \"MAP_DEV\" is the address of the", " struct mapped_device. \"DM_TABLE\" is the address of the struct", " dm_table. \"TARGETS\" is the number of targets which are in", " the struct dm_table.", "", " %s> dminfo", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 8 c4866c80 c4866280 1 vg0-snap0", " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", " 253 0 c4840380 c4841880 1 mp0", " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", " 253 4 d441e280 c919ed80 1 dummy1", " 253 3 f5dc4280 cba81d80 1 dummy0", " 253 2 f7c53180 c4866180 1 vg0-lv0", " 253 1 f746d280 f746cd80 1 mp0p1", "", " Display the dm device information which the bio is submitted in.", " The bio (ceacee80) is a clone of the bio (ceacee00) which is", " submitted in the dm-3 (dummy0). And the bio (ceacee00) is a clone", " of the bio (ceaced80) which is submitted in the dm-4 (dummy1), too.", " The bio (ceaced80) is the original bio.", "", " %s> dminfo -b ceacee80", " DM_BIO_ADDRESS MAJ MIN MAP_DEV DEVNAME", " ceacee00 253 3 f5dc4280 dm-3", " crash> dminfo -b ceacee00", " DM_BIO_ADDRESS MAJ MIN MAP_DEV DEVNAME", " ceaced80 253 4 d441e280 dm-4", " crash> dminfo -b ceaced80", " dminfo: invalid kernel virtual address: 64 type: \"GET_VALUE: dm_io.bio\"", "", " Display dependency information for each target.", " The vg0-snap0 depends on thd dm-6 (vg0-lv0-real) and the dm-7", " (vg0-snap0-cow)", "", " %s> dminfo -d", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 8 c4866c80 c4866280 1 vg0-snap0", " MAJ MIN GENDISK COUNT DEVNAME", " 253 7 c4866980 1 dm-7", " 253 6 f6a04280 1 dm-6", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", " MAJ MIN GENDISK COUNT DEVNAME", " 8 0 f7f24c80 1 sda", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", " MAJ MIN GENDISK COUNT DEVNAME", " 8 0 f7f24c80 1 sda", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 2 f7c53180 c4866180 1 vg0-lv0", " MAJ MIN GENDISK COUNT DEVNAME", " 253 6 f6a04280 1 dm-6", "", " Display queued I/O information for each target.", " The information is displayed under the \"PRIVATE_DATA\" column.", "", " %s> dminfo -q", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", " TARGET TARGET_TYPE PRIVATE_DATA", " f8961080 mirror (reads) (writes) (quiesced) (recovered)", "", " --------------------------------------------------------------", " \"reads/writes\" are members of the struct mirror_set, and", " \"quiesced/recovered\" are members of the struct region_hash.", " If the list is empty, the member is bracketed by \"()\".", " --------------------------------------------------------------", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 0 c4840380 c4841880 1 mp0", " TARGET TARGET_TYPE PRIVATE_DATA", " f8802080 multipath queue_size:0", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 1 f746d280 f746cd80 1 mp0p1", " TARGET TARGET_TYPE PRIVATE_DATA", " f8821080 linear No queue info", "", " Display status information for each target.", " The information is displayed under the \"PRIVATE_DATA\" column.", "", " %s> dminfo -s", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 0 c4840380 c4841880 1 mp0", " TARGET TARGET_TYPE PRIVATE_DATA", " f8802080 multipath queue_if_no_path:0 hwh:none nr_pgs:1", " PG PG_STATUS NR_PATHS PATHS", " 1 active 2 8:16(A,0) 8:32(A,0)", "", " --------------------------------------------------------------", " Format of \"PATHS\": :(,)", " Status: A:active, F:faulty", " Fail_count: the value of the struct pgpath.fail_count", " --------------------------------------------------------------", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", " TARGET TARGET_TYPE PRIVATE_DATA", " f8961080 mirror in_sync:1 dev:8:16(A,0),8:32(A,0)", "", " --------------------------------------------------------------", " Format of \"dev\": :(,)", " Status: A:active, D:degraded", " Error_count: the value of the struct mirror.error_count", " --------------------------------------------------------------", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 1 f746d280 f746cd80 1 mp0p1", " TARGET TARGET_TYPE PRIVATE_DATA", " f8821080 linear No status info", "", " Display table information for each target.", " The information is displayed under the \"PRIVATE_DATA\" column.", "", " %s> dminfo -t", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 8 c4866c80 c4866280 1 vg0-snap0", " TARGET TARGET_TYPE PRIVATE_DATA", " f89b4080 snapshot orig:253:6 cow:253:7 type:P chunk_size:16", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", " TARGET TARGET_TYPE PRIVATE_DATA", " f890f080 linear begin:0 len:204800 dev:8:5 offset:384", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 0 c4840380 c4841880 1 mp0", " TARGET TARGET_TYPE PRIVATE_DATA", " f8802080 multipath queue_if_no_path:0 hwh:none nr_pgs:1", " PG PATH_SELECTOR NR_PATHS PATHS", " 1 round-robin 2 8:16 8:32", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", " TARGET TARGET_TYPE PRIVATE_DATA", " f8961080 mirror log:core dev:8:16(0),8:32(0)", "", " --------------------------------------------------------------", " Format of \"dev\": :()", " Offset: the value of the struct mirror.offset", " --------------------------------------------------------------", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", " TARGET TARGET_TYPE PRIVATE_DATA", " f899d080 linear begin:0 len:8192 dev:8:5 offset:205184", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 2 f7c53180 c4866180 1 vg0-lv0", " TARGET TARGET_TYPE PRIVATE_DATA", " f8bbc080 snapshot-origin orig_dev:253:6", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 1 f746d280 f746cd80 1 mp0p1", " TARGET TARGET_TYPE PRIVATE_DATA", " f8821080 linear begin:0 len:2040192 dev:253:0 offset:63", NULL }; /* * Registering command extension */ static struct command_table_entry command_table[] = { {"dminfo", cmd_dminfo, help_dminfo, 0}, {NULL, NULL, NULL, 0}, }; void __attribute__((constructor)) dminfo_init(void) { register_extension(command_table); dminfo_register_target_analyzer(&zero_analyzer); dminfo_register_target_analyzer(&error_analyzer); dminfo_register_target_analyzer(&linear_analyzer); dminfo_register_target_analyzer(&mirror_analyzer); dminfo_register_target_analyzer(&multipath_analyzer); dminfo_register_target_analyzer(&crypt_analyzer); dminfo_register_target_analyzer(&stripe_analyzer); dminfo_register_target_analyzer(&snapshot_analyzer); dminfo_register_target_analyzer(&snapshot_origin_analyzer); } void __attribute__((destructor)) dminfo_fini(void) { } crash-7.1.4/extensions/trace.c0000664000000000000000000014267012634305150014775 0ustar rootroot/* * trace extension module for crash * * Copyright (C) 2009, 2010 FUJITSU LIMITED * Author: Lai Jiangshan * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. */ #define _GNU_SOURCE #include "defs.h" #include #include #include #include static int verbose = 0; static int nr_cpu_ids; /* * lockless ring_buffer and old non-lockless ring_buffer are both supported. */ static int lockless_ring_buffer; static int per_cpu_buffer_sizes; /* * global and encapsulated current_trace are both supported */ static int encapsulated_current_trace; /* * trace_buffer is supported */ static int trace_buffer_available; /* * max_buffer is supported */ static int max_buffer_available; #define koffset(struct, member) struct##_##member##_offset static int koffset(trace_array, current_trace); static int koffset(trace_array, trace_buffer); static int koffset(trace_array, max_buffer); static int koffset(trace_buffer, buffer); static int koffset(trace_array, buffer); static int koffset(tracer, name); static int koffset(ring_buffer, pages); static int koffset(ring_buffer, flags); static int koffset(ring_buffer, cpus); static int koffset(ring_buffer, buffers); static int koffset(ring_buffer_per_cpu, cpu); static int koffset(ring_buffer_per_cpu, pages); static int koffset(ring_buffer_per_cpu, nr_pages); static int koffset(ring_buffer_per_cpu, head_page); static int koffset(ring_buffer_per_cpu, tail_page); static int koffset(ring_buffer_per_cpu, commit_page); static int koffset(ring_buffer_per_cpu, reader_page); static int koffset(ring_buffer_per_cpu, overrun); static int koffset(ring_buffer_per_cpu, entries); static int koffset(buffer_page, read); static int koffset(buffer_page, list); static int koffset(buffer_page, page); static int koffset(list_head, next); static int koffset(ftrace_event_call, list); static int koffset(ftrace_event_field, link); static int koffset(ftrace_event_field, name); static int koffset(ftrace_event_field, type); static int koffset(ftrace_event_field, offset); static int koffset(ftrace_event_field, size); static int koffset(ftrace_event_field, is_signed); static int koffset(POINTER_SYM, POINTER) = 0; struct ring_buffer_per_cpu { ulong kaddr; ulong head_page; ulong tail_page; ulong commit_page; ulong reader_page; ulong real_head_page; int head_page_index; unsigned int nr_pages; ulong *pages; ulong *linear_pages; int nr_linear_pages; ulong overrun; ulong entries; }; static ulong global_trace; static ulong global_trace_buffer; static ulong global_max_buffer; static ulong global_ring_buffer; static unsigned global_pages; static struct ring_buffer_per_cpu *global_buffers; static ulong max_tr_trace; static ulong max_tr_ring_buffer; static unsigned max_tr_pages; static struct ring_buffer_per_cpu *max_tr_buffers; static ulong ftrace_events; static ulong current_trace; static const char *current_tracer_name; static void ftrace_destroy_event_types(void); static int ftrace_init_event_types(void); /* at = ((struct *)ptr)->member */ #define read_value(at, ptr, struct, member) \ do { \ if (!readmem(ptr + koffset(struct, member), KVADDR, \ &at, sizeof(at), #struct "'s " #member, \ RETURN_ON_ERROR)) \ goto out_fail;\ } while (0) /* Remove the "const" qualifiers for ptr */ #define free(ptr) free((void *)(ptr)) static int write_and_check(int fd, void *data, size_t size) { size_t tot = 0; size_t w; do { w = write(fd, data, size - tot); tot += w; if (w <= 0) return -1; } while (tot != size); return 0; } #ifndef PATH_MAX #define PATH_MAX 4096 #endif static int init_offsets(void) { #define init_offset(struct, member) do { \ koffset(struct, member) = MEMBER_OFFSET(#struct, #member);\ if (koffset(struct, member) < 0) { \ fprintf(fp, "failed to init the offset, struct:"\ #struct ", member:" #member); \ fprintf(fp, "\n"); \ return -1; \ } \ } while (0) if (encapsulated_current_trace) init_offset(trace_array, current_trace); if (trace_buffer_available) { init_offset(trace_array, trace_buffer); init_offset(trace_buffer, buffer); if (max_buffer_available) init_offset(trace_array, max_buffer); } else { init_offset(trace_array, buffer); } init_offset(tracer, name); if (MEMBER_EXISTS("ring_buffer_per_cpu", "nr_pages")) { per_cpu_buffer_sizes = 1; if (verbose) fprintf(fp, "per cpu buffer sizes\n"); } if (kernel_symbol_exists("ring_buffer_read")) gdb_set_crash_scope(symbol_value("ring_buffer_read"), "ring_buffer_read"); if (!per_cpu_buffer_sizes) init_offset(ring_buffer, pages); init_offset(ring_buffer, flags); init_offset(ring_buffer, cpus); init_offset(ring_buffer, buffers); if (MEMBER_SIZE("ring_buffer_per_cpu", "pages") == sizeof(ulong)) { lockless_ring_buffer = 1; if (verbose) fprintf(fp, "lockless\n"); } if (per_cpu_buffer_sizes) init_offset(ring_buffer_per_cpu, nr_pages); init_offset(ring_buffer_per_cpu, cpu); init_offset(ring_buffer_per_cpu, pages); init_offset(ring_buffer_per_cpu, head_page); init_offset(ring_buffer_per_cpu, tail_page); init_offset(ring_buffer_per_cpu, commit_page); init_offset(ring_buffer_per_cpu, reader_page); init_offset(ring_buffer_per_cpu, overrun); init_offset(ring_buffer_per_cpu, entries); init_offset(buffer_page, read); init_offset(buffer_page, list); init_offset(buffer_page, page); init_offset(list_head, next); koffset(ftrace_event_call, list) = MAX(MEMBER_OFFSET("ftrace_event_call", "list"), MEMBER_OFFSET("trace_event_call", "list")); if (koffset(ftrace_event_call, list) < 0) { fprintf(fp, "failed to init the offset, struct:[f]trace_event_call member:list)\n"); return -1; \ } init_offset(ftrace_event_field, link); init_offset(ftrace_event_field, name); init_offset(ftrace_event_field, type); init_offset(ftrace_event_field, offset); init_offset(ftrace_event_field, size); init_offset(ftrace_event_field, is_signed); return 0; #undef init_offset } static void print_offsets(void) { if (!verbose) return; #define print_offset(struct, member) fprintf(fp, \ "koffset(" #struct ", " #member ") = %d\n", koffset(struct, member)) print_offset(trace_array, buffer); print_offset(tracer, name); print_offset(ring_buffer, pages); print_offset(ring_buffer, flags); print_offset(ring_buffer, cpus); print_offset(ring_buffer, buffers); print_offset(ring_buffer_per_cpu, cpu); print_offset(ring_buffer_per_cpu, pages); print_offset(ring_buffer_per_cpu, head_page); print_offset(ring_buffer_per_cpu, tail_page); print_offset(ring_buffer_per_cpu, commit_page); print_offset(ring_buffer_per_cpu, reader_page); print_offset(ring_buffer_per_cpu, overrun); print_offset(ring_buffer_per_cpu, entries); print_offset(buffer_page, read); print_offset(buffer_page, list); print_offset(buffer_page, page); print_offset(list_head, next); print_offset(ftrace_event_call, list); print_offset(ftrace_event_field, link); print_offset(ftrace_event_field, name); print_offset(ftrace_event_field, type); print_offset(ftrace_event_field, offset); print_offset(ftrace_event_field, size); print_offset(ftrace_event_field, is_signed); #undef print_offset } static int ftrace_init_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) { unsigned j = 0, count = 0; ulong head, page; ulong real_head_page = cpu_buffer->head_page; cpu_buffer->pages = calloc(sizeof(ulong), nr_pages); if (cpu_buffer->pages == NULL) return -1; cpu_buffer->linear_pages = calloc(sizeof(ulong), nr_pages + 1); if (cpu_buffer->linear_pages == NULL) { return -1; } if (lockless_ring_buffer) { read_value(head, cpu_buffer->kaddr, ring_buffer_per_cpu, pages); cpu_buffer->pages[j++] = head - koffset(buffer_page, list); } else head = cpu_buffer->kaddr + koffset(ring_buffer_per_cpu, pages); page = head; for (;;) { read_value(page, page, list_head, next); if (page & 3) { /* lockless_ring_buffer */ page &= ~3; real_head_page = page - koffset(buffer_page, list); } if (j == nr_pages) break; if (page == head) { error(INFO, "Num of pages is less than %d\n", nr_pages); goto out_fail; } cpu_buffer->pages[j++] = page - koffset(buffer_page, list); } if (page != head) { error(INFO, "Num of pages is larger than %d\n", nr_pages); goto out_fail; } /* find head page and head_page_index */ cpu_buffer->real_head_page = real_head_page; cpu_buffer->head_page_index = -1; for (j = 0; j < nr_pages; j++) { if (cpu_buffer->pages[j] == real_head_page) { cpu_buffer->head_page_index = j; break; } } if (cpu_buffer->head_page_index == -1) { error(INFO, "error for resolve head_page_index\n"); goto out_fail; } /* Setup linear pages */ cpu_buffer->linear_pages[count++] = cpu_buffer->reader_page; if (cpu_buffer->reader_page == cpu_buffer->commit_page) goto done; j = cpu_buffer->head_page_index; for (;;) { cpu_buffer->linear_pages[count++] = cpu_buffer->pages[j]; if (cpu_buffer->pages[j] == cpu_buffer->commit_page) break; j++; if (j == nr_pages) j = 0; if (j == cpu_buffer->head_page_index) { /* cpu_buffer->commit_page may be corrupted */ break; } } done: cpu_buffer->nr_linear_pages = count; return 0; out_fail: return -1; } static void ftrace_destroy_buffers(struct ring_buffer_per_cpu *buffers) { int i; for (i = 0; i < nr_cpu_ids; i++) { if (!buffers[i].kaddr) continue; free(buffers[i].pages); free(buffers[i].linear_pages); } } static int ftrace_init_buffers(struct ring_buffer_per_cpu *buffers, ulong ring_buffer, unsigned pages) { int i; ulong buffers_array; read_value(buffers_array, ring_buffer, ring_buffer, buffers); for (i = 0; i < nr_cpu_ids; i++) { if (!readmem(buffers_array + sizeof(ulong) * i, KVADDR, &buffers[i].kaddr, sizeof(ulong), "ring_buffer's cpu buffer", RETURN_ON_ERROR)) goto out_fail; if (!buffers[i].kaddr) continue; #define buffer_read_value(member) read_value(buffers[i].member, \ buffers[i].kaddr, ring_buffer_per_cpu, member) buffer_read_value(head_page); buffer_read_value(tail_page); buffer_read_value(commit_page); buffer_read_value(reader_page); buffer_read_value(overrun); buffer_read_value(entries); if (per_cpu_buffer_sizes) { buffer_read_value(nr_pages); pages = buffers[i].nr_pages; } else buffers[i].nr_pages = pages; #undef buffer_read_value if (ftrace_init_pages(buffers + i, pages) < 0) goto out_fail; if (verbose) { fprintf(fp, "overrun=%lu\n", buffers[i].overrun); fprintf(fp, "entries=%lu\n", buffers[i].entries); } } return 0; out_fail: ftrace_destroy_buffers(buffers); return -1; } static int ftrace_int_global_trace(void) { if (trace_buffer_available) { global_trace_buffer = global_trace + koffset(trace_array, trace_buffer); read_value(global_ring_buffer, global_trace_buffer, trace_buffer, buffer); } else { read_value(global_ring_buffer, global_trace, trace_array, buffer); read_value(global_pages, global_ring_buffer, ring_buffer, pages); } global_buffers = calloc(sizeof(*global_buffers), nr_cpu_ids); if (global_buffers == NULL) goto out_fail; if (ftrace_init_buffers(global_buffers, global_ring_buffer, global_pages) < 0) goto out_fail; return 0; out_fail: free(global_buffers); return -1; } static int ftrace_int_max_tr_trace(void) { if (trace_buffer_available) { if (!max_buffer_available) return 0; global_max_buffer = global_trace + koffset(trace_array, max_buffer); read_value(max_tr_ring_buffer, global_max_buffer, trace_buffer, buffer); } else { read_value(max_tr_ring_buffer, max_tr_trace, trace_array, buffer); if (!max_tr_ring_buffer) return 0; read_value(max_tr_pages, max_tr_ring_buffer, ring_buffer, pages); } max_tr_buffers = calloc(sizeof(*max_tr_buffers), nr_cpu_ids); if (max_tr_buffers == NULL) goto out_fail; if (ftrace_init_buffers(max_tr_buffers, max_tr_ring_buffer, max_tr_pages) < 0) goto out_fail; return 0; out_fail: free(max_tr_buffers); max_tr_ring_buffer = 0; return -1; } static int ftrace_init_current_tracer(void) { ulong addr; char tmp[128]; /* Get current tracer name */ if (encapsulated_current_trace) { read_value(addr, global_trace, trace_array, current_trace); } else { read_value(addr, current_trace, POINTER_SYM, POINTER); } read_value(addr, addr, tracer, name); read_string(addr, tmp, 128); current_tracer_name = strdup(tmp); if (current_tracer_name == NULL) goto out_fail; return 0; out_fail: return -1; } static int ftrace_init(void) { struct syment *sym_global_trace; struct syment *sym_max_tr_trace; struct syment *sym_ftrace_events; struct syment *sym_current_trace; sym_global_trace = symbol_search("global_trace"); sym_ftrace_events = symbol_search("ftrace_events"); if (sym_global_trace == NULL || sym_ftrace_events == NULL) return -1; global_trace = sym_global_trace->value; ftrace_events = sym_ftrace_events->value; if (MEMBER_EXISTS("trace_array", "current_trace")) { encapsulated_current_trace = 1; } else { sym_current_trace = symbol_search("current_trace"); if (sym_current_trace == NULL) return -1; current_trace = sym_current_trace->value; } if (MEMBER_EXISTS("trace_array", "trace_buffer")) { trace_buffer_available = 1; if (MEMBER_EXISTS("trace_array", "max_buffer")) max_buffer_available = 1; } else { sym_max_tr_trace = symbol_search("max_tr"); if (sym_max_tr_trace == NULL) return -1; max_tr_trace = sym_max_tr_trace->value; } if (!try_get_symbol_data("nr_cpu_ids", sizeof(int), &nr_cpu_ids)) nr_cpu_ids = 1; if (init_offsets() < 0) return -1; print_offsets(); if (ftrace_int_global_trace() < 0) goto out_0; ftrace_int_max_tr_trace(); if (ftrace_init_event_types() < 0) goto out_1; if (ftrace_init_current_tracer() < 0) goto out_2; return 0; out_2: ftrace_destroy_event_types(); out_1: if (max_tr_ring_buffer) { ftrace_destroy_buffers(max_tr_buffers); free(max_tr_buffers); } ftrace_destroy_buffers(global_buffers); free(global_buffers); out_0: return -1; } static void ftrace_destroy(void) { free(current_tracer_name); ftrace_destroy_event_types(); if (max_tr_ring_buffer) { ftrace_destroy_buffers(max_tr_buffers); free(max_tr_buffers); } ftrace_destroy_buffers(global_buffers); free(global_buffers); } static int ftrace_dump_page(int fd, ulong page, void *page_tmp) { ulong raw_page; read_value(raw_page, page, buffer_page, page); if (!readmem(raw_page, KVADDR, page_tmp, PAGESIZE(), "get page context", RETURN_ON_ERROR)) goto out_fail; if (write_and_check(fd, page_tmp, PAGESIZE())) return -1; return 0; out_fail: return -1; } static void ftrace_dump_buffer(int fd, struct ring_buffer_per_cpu *cpu_buffer, unsigned pages, void *page_tmp) { int i; for (i = 0; i < cpu_buffer->nr_linear_pages; i++) { if (ftrace_dump_page(fd, cpu_buffer->linear_pages[i], page_tmp) < 0) break; } } static int try_mkdir(const char *pathname, mode_t mode) { int ret; ret = mkdir(pathname, mode); if (ret < 0) { if (errno == EEXIST) return 0; error(INFO, "mkdir failed\n"); return -1; } return 0; } static int ftrace_dump_buffers(const char *per_cpu_path) { int i; void *page_tmp; char path[PATH_MAX]; int fd; page_tmp = malloc(PAGESIZE()); if (page_tmp == NULL) return -1; for (i = 0; i < nr_cpu_ids; i++) { struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i]; if (!cpu_buffer->kaddr) continue; snprintf(path, sizeof(path), "%s/cpu%d", per_cpu_path, i); if (try_mkdir(path, 0755) < 0) goto out_fail; snprintf(path, sizeof(path), "%s/cpu%d/trace_pipe_raw", per_cpu_path, i); fd = open(path, O_WRONLY | O_CREAT, 0644); if (fd < 0) goto out_fail; ftrace_dump_buffer(fd, cpu_buffer, global_pages, page_tmp); close(fd); } free(page_tmp); return 0; out_fail: free(page_tmp); return -1; } #define MAX_CACHE_ID 256 struct ftrace_field { const char *name; const char *type; int offset; int size; int is_signed; }; struct event_type { struct event_type *next; const char *system; const char *name; int plugin; const char *print_fmt; int id; int nfields; struct ftrace_field *fields; }; static struct event_type *event_type_cache[MAX_CACHE_ID]; static struct event_type **event_types; static int nr_event_types; static struct ftrace_field *ftrace_common_fields; static int ftrace_common_fields_count; static int syscall_get_enter_fields(ulong call, ulong *fields) { static int inited; static int data_offset; static int enter_fields_offset; ulong metadata; if (inited) goto work; inited = 1; data_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "data"), MEMBER_OFFSET("trace_event_call", "data")); if (data_offset < 0) return -1; enter_fields_offset = MEMBER_OFFSET("syscall_metadata", "enter_fields"); if (enter_fields_offset < 0) return -1; work: if (data_offset < 0 || enter_fields_offset < 0) return -1; if (!readmem(call + data_offset, KVADDR, &metadata, sizeof(metadata), "read ftrace_event_call data", RETURN_ON_ERROR)) return -1; *fields = metadata + enter_fields_offset; return 0; } static int syscall_get_exit_fields_old(ulong call, ulong *fields) { static int inited; static int data_offset; static int exit_fields_offset; ulong metadata; if (inited) goto work; inited = 1; data_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "data"), MEMBER_OFFSET("trace_event_call", "data")); if (data_offset < 0) return -1; exit_fields_offset = MEMBER_OFFSET("syscall_metadata", "exit_fields"); if (exit_fields_offset < 0) return -1; work: if (data_offset < 0 || exit_fields_offset < 0) return -1; if (!readmem(call + data_offset, KVADDR, &metadata, sizeof(metadata), "read ftrace_event_call data", RETURN_ON_ERROR)) return -1; *fields = metadata + exit_fields_offset; return 0; } static int syscall_get_exit_fields(ulong call, ulong *fields) { static int inited; static ulong syscall_exit_fields_value; if (!inited) { struct syment *sp; if (!(sp = symbol_search("syscall_exit_fields"))) { inited = -1; } else { syscall_exit_fields_value = sp->value; inited = 1; } } if (inited == -1) return syscall_get_exit_fields_old(call, fields); *fields = syscall_exit_fields_value; return 0; } static int ftrace_get_event_type_fields(ulong call, ulong *fields) { static int inited; static int fields_offset; static int class_offset; static int get_fields_offset; static ulong syscall_get_enter_fields_value; static ulong syscall_get_exit_fields_value; struct syment *sp; ulong class, get_fields; if (inited) goto work; inited = 1; fields_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "fields"), MEMBER_OFFSET("trace_event_call", "fields")); class_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "class"), MEMBER_OFFSET("trace_event_call", "class")); if (class_offset < 0) goto work; inited = 2; fields_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "fields"), MEMBER_OFFSET("trace_event_class", "fields")); if (fields_offset < 0) return -1; get_fields_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "get_fields"), MEMBER_OFFSET("trace_event_class", "get_fields")); if ((sp = symbol_search("syscall_get_enter_fields")) != NULL) syscall_get_enter_fields_value = sp->value; if ((sp = symbol_search("syscall_get_exit_fields")) != NULL) syscall_get_exit_fields_value = sp->value; work: if (fields_offset < 0) return -1; if (inited == 1) { *fields = call + fields_offset; return 0; } if (!readmem(call + class_offset, KVADDR, &class, sizeof(class), "read ftrace_event_call class", RETURN_ON_ERROR)) return -1; if (!readmem(class + get_fields_offset, KVADDR, &get_fields, sizeof(get_fields), "read ftrace_event_call get_fields", RETURN_ON_ERROR)) return -1; if (!get_fields) { *fields = class + fields_offset; return 0; } if (get_fields == syscall_get_enter_fields_value) return syscall_get_enter_fields(call, fields); if (get_fields == syscall_get_exit_fields_value) return syscall_get_exit_fields(call, fields); fprintf(fp, "Unkown get_fields function\n"); return -1; } static int ftrace_init_event_fields(ulong fields_head, int *pnfields, struct ftrace_field **pfields) { ulong pos; int nfields = 0, max_fields = 16; struct ftrace_field *fields = NULL; read_value(pos, fields_head, list_head, next); if (pos == 0) { if (verbose) fprintf(fp, "no field, head: %lu\n", fields_head); return 0; } fields = malloc(sizeof(*fields) * max_fields); if (fields == NULL) return -1; while (pos != fields_head) { ulong field; ulong name_addr, type_addr; char field_name[128], field_type[128]; int offset, size, is_signed; field = pos - koffset(ftrace_event_field, link); /* Read a field from the core */ read_value(name_addr, field, ftrace_event_field, name); read_value(type_addr, field, ftrace_event_field, type); read_value(offset, field, ftrace_event_field, offset); read_value(size, field, ftrace_event_field, size); read_value(is_signed, field, ftrace_event_field, is_signed); if (!read_string(name_addr, field_name, 128)) goto out_fail; if (!read_string(type_addr, field_type, 128)) goto out_fail; /* Enlarge fields array when need */ if (nfields >= max_fields) { void *tmp; max_fields = nfields * 2; tmp = realloc(fields, sizeof(*fields) * max_fields); if (tmp == NULL) goto out_fail; fields = tmp; } /* Set up and Add a field */ fields[nfields].offset = offset; fields[nfields].size = size; fields[nfields].is_signed = is_signed; fields[nfields].name = strdup(field_name); if (fields[nfields].name == NULL) goto out_fail; fields[nfields].type = strdup(field_type); if (fields[nfields].type == NULL) { free(fields[nfields].name); goto out_fail; } nfields++; /* Advance to the next field */ read_value(pos, pos, list_head, next); } *pnfields = nfields; *pfields = fields; return 0; out_fail: for (nfields--; nfields >= 0; nfields--) { free(fields[nfields].name); free(fields[nfields].type); } free(fields); return -1; } static int ftrace_init_event_type(ulong call, struct event_type *aevent_type) { ulong fields_head = 0; if (ftrace_get_event_type_fields(call, &fields_head) < 0) return -1; return ftrace_init_event_fields(fields_head, &aevent_type->nfields, &aevent_type->fields); } static int ftrace_init_common_fields(void) { ulong ftrace_common_fields_head; struct syment *sp; sp = symbol_search("ftrace_common_fields"); if (!sp) return 0; ftrace_common_fields_head = sp->value; return ftrace_init_event_fields(ftrace_common_fields_head, &ftrace_common_fields_count, &ftrace_common_fields); } static void ftrace_destroy_event_types(void) { int i, j; for (i = 0; i < nr_event_types; i++) { for (j = 0; j < event_types[i]->nfields; j++) { free(event_types[i]->fields[j].name); free(event_types[i]->fields[j].type); } free(event_types[i]->fields); free(event_types[i]->system); free(event_types[i]->name); free(event_types[i]->print_fmt); free(event_types[i]); } free(event_types); free(ftrace_common_fields); } #define TRACE_EVENT_FL_TRACEPOINT 0x40 static int ftrace_get_event_type_name(ulong call, char *name, int len) { static int inited; static int name_offset; static int flags_offset; static int tp_name_offset; uint flags; ulong name_addr; if (inited) goto work; inited = 1; name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"), MEMBER_OFFSET("trace_event_call", "name")); if (name_offset >= 0) goto work; name_offset = MAX(ANON_MEMBER_OFFSET("ftrace_event_call", "name"), ANON_MEMBER_OFFSET("trace_event_call", "name")); if (name_offset < 0) return -1; flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"), MEMBER_OFFSET("trace_event_call", "flags")); if (flags_offset < 0) return -1; tp_name_offset = MEMBER_OFFSET("tracepoint", "name"); if (tp_name_offset < 0) return -1; inited = 2; work: if (name_offset < 0) return -1; if (!readmem(call + name_offset, KVADDR, &name_addr, sizeof(name_addr), "read ftrace_event_call name_addr", RETURN_ON_ERROR)) return -1; if (inited == 2) { if (!readmem(call + flags_offset, KVADDR, &flags, sizeof(flags), "read ftrace_event_call flags", RETURN_ON_ERROR)) return -1; if (flags & TRACE_EVENT_FL_TRACEPOINT) { if (!readmem(name_addr + tp_name_offset, KVADDR, &name_addr, sizeof(name_addr), "read tracepoint name", RETURN_ON_ERROR)) return -1; } } if (!read_string(name_addr, name, len)) return -1; return 0; } static int ftrace_get_event_type_system(ulong call, char *system, int len) { static int inited; static int sys_offset; static int class_offset; ulong ptr = call; ulong sys_addr; if (inited) goto work; inited = 1; sys_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "system"), MEMBER_OFFSET("trace_event_call", "system")); if (sys_offset >= 0) goto work; class_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "class"), MEMBER_OFFSET("trace_event_call", "class")); if (class_offset < 0) return -1; sys_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "system"), MEMBER_OFFSET("trace_event_class", "system")); inited = 2; work: if (sys_offset < 0) return -1; if (inited == 2 && !readmem(call + class_offset, KVADDR, &ptr, sizeof(ptr), "read ftrace_event_call class_addr", RETURN_ON_ERROR)) return -1; if (!readmem(ptr + sys_offset, KVADDR, &sys_addr, sizeof(sys_addr), "read ftrace_event_call sys_addr", RETURN_ON_ERROR)) return -1; if (!read_string(sys_addr, system, len)) return -1; return 0; } static int read_long_string(ulong kvaddr, char **buf) { char strbuf[MIN_PAGE_SIZE], *ret_buf = NULL; ulong kp; int cnt1, cnt2, size; again: kp = kvaddr; size = 0; for (;;) { cnt1 = MIN_PAGE_SIZE - (kp & (MIN_PAGE_SIZE-1)); if (!readmem(kp, KVADDR, strbuf, cnt1, "readstring characters", QUIET|RETURN_ON_ERROR)) return -1; cnt2 = strnlen(strbuf, cnt1); if (ret_buf) memcpy(ret_buf + size, strbuf, cnt2); kp += cnt2; size += cnt2; if (cnt2 < cnt1) { if (ret_buf) { break; } else { ret_buf = malloc(size + 1); if (!ret_buf) return -1; goto again; } } } ret_buf[size] = '\0'; *buf = ret_buf; return size; } static int ftrace_get_event_type_print_fmt(ulong call, char **print_fmt) { static int inited; static int fmt_offset; ulong fmt_addr; if (!inited) { inited = 1; fmt_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "print_fmt"), MEMBER_OFFSET("trace_event_call", "print_fmt")); } if (fmt_offset < 0) { *print_fmt = strdup("Unknown print_fmt"); return 0; } if (!readmem(call + fmt_offset, KVADDR, &fmt_addr, sizeof(fmt_addr), "read ftrace_event_call fmt_addr", RETURN_ON_ERROR)) return -1; return read_long_string(fmt_addr, print_fmt); } static int ftrace_get_event_type_id(ulong call, int *id) { static int inited; static int id_offset; if (!inited) { inited = 1; id_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "id"), MEMBER_OFFSET("trace_event_call", "id")); if (id_offset < 0) { /* id = call->event.type */ int f1 = MAX(MEMBER_OFFSET("ftrace_event_call", "event"), MEMBER_OFFSET("trace_event_call", "event")); int f2 = MEMBER_OFFSET("trace_event", "type"); if (f1 >= 0 && f2 >= 0) id_offset = f1 + f2; } } if (id_offset < 0) return -1; if (!readmem(call + id_offset, KVADDR, id, sizeof(*id), "read ftrace_event_call id", RETURN_ON_ERROR)) return -1; return 0; } static int ftrace_init_event_types(void) { ulong event; struct event_type *aevent_type; int max_types = 128; event_types = malloc(sizeof(*event_types) * max_types); if (event_types == NULL) return -1; read_value(event, ftrace_events, list_head, next); while (event != ftrace_events) { ulong call; char name[128], system[128], *print_fmt; int id; call = event - koffset(ftrace_event_call, list); /* Read a event type from the core */ if (ftrace_get_event_type_id(call, &id) < 0 || ftrace_get_event_type_name(call, name, 128) < 0 || ftrace_get_event_type_system(call, system, 128) < 0 || ftrace_get_event_type_print_fmt(call, &print_fmt) < 0) goto out_fail; /* Enlarge event types array when need */ if (nr_event_types >= max_types) { void *tmp; max_types = 2 * nr_event_types; tmp = realloc(event_types, sizeof(*event_types) * max_types); if (tmp == NULL) { free(print_fmt); goto out_fail; } event_types = tmp; } /* Create a event type */ aevent_type = malloc(sizeof(*aevent_type)); if (aevent_type == NULL) { free(print_fmt); goto out_fail; } aevent_type->system = strdup(system); aevent_type->name = strdup(name); aevent_type->print_fmt = print_fmt; aevent_type->id = id; aevent_type->nfields = 0; aevent_type->fields = NULL; if (aevent_type->system == NULL || aevent_type->name == NULL) goto out_fail_free_aevent_type; if (ftrace_init_event_type(call, aevent_type) < 0) goto out_fail_free_aevent_type; if (!strcmp("ftrace", aevent_type->system)) aevent_type->plugin = 1; else aevent_type->plugin = 0; /* Add a event type */ event_types[nr_event_types++] = aevent_type; if ((unsigned)id < MAX_CACHE_ID) event_type_cache[id] = aevent_type; /* Advance to the next event type */ read_value(event, event, list_head, next); } if (ftrace_init_common_fields() < 0) goto out_fail; return 0; out_fail_free_aevent_type: free(aevent_type->system); free(aevent_type->name); free(aevent_type->print_fmt); free(aevent_type); out_fail: ftrace_destroy_event_types(); return -1; } #define default_common_field_count 5 static int ftrace_dump_event_type(struct event_type *t, const char *path) { char format_path[PATH_MAX]; FILE *out; int i, nfields; struct ftrace_field *fields; int printed_common_field = 0; snprintf(format_path, sizeof(format_path), "%s/format", path); out = fopen(format_path, "w"); if (out == NULL) return -1; fprintf(out, "name: %s\n", t->name); fprintf(out, "ID: %d\n", t->id); fprintf(out, "format:\n"); if (ftrace_common_fields_count) { nfields = ftrace_common_fields_count; fields = ftrace_common_fields; } else { nfields = default_common_field_count; fields = &t->fields[t->nfields - nfields]; } again: for (i = nfields - 1; i >= 0; i--) { /* * Smartly shows the array type(except dynamic array). * Normal: * field:TYPE VAR * If TYPE := TYPE[LEN], it is shown: * field:TYPE VAR[LEN] */ struct ftrace_field *field = &fields[i]; const char *array_descriptor = strchr(field->type, '['); if (!strncmp(field->type, "__data_loc", 10)) array_descriptor = NULL; if (!array_descriptor) { fprintf(out, "\tfield:%s %s;\toffset:%u;" "\tsize:%u;\tsigned:%d;\n", field->type, field->name, field->offset, field->size, !!field->is_signed); } else { fprintf(out, "\tfield:%.*s %s%s;\toffset:%u;" "\tsize:%u;\tsigned:%d;\n", (int)(array_descriptor - field->type), field->type, field->name, array_descriptor, field->offset, field->size, !!field->is_signed); } } if (!printed_common_field) { fprintf(out, "\n"); if (ftrace_common_fields_count) nfields = t->nfields; else nfields = t->nfields - default_common_field_count; fields = t->fields; printed_common_field = 1; goto again; } fprintf(out, "\nprint fmt: %s\n", t->print_fmt); fclose(out); return 0; } static int ftrace_dump_event_types(const char *events_path) { int i; for (i = 0; i < nr_event_types; i++) { char path[PATH_MAX]; struct event_type *t = event_types[i]; snprintf(path, sizeof(path), "%s/%s", events_path, t->system); if (try_mkdir(path, 0755) < 0) return -1; snprintf(path, sizeof(path), "%s/%s/%s", events_path, t->system, t->name); if (try_mkdir(path, 0755) < 0) return -1; if (ftrace_dump_event_type(t, path) < 0) return -1; } return 0; } static void show_basic_info(void) { fprintf(fp, "current tracer is %s\n", current_tracer_name); } static int dump_saved_cmdlines(const char *dump_tracing_dir) { char path[PATH_MAX]; FILE *out; int i; struct task_context *tc; snprintf(path, sizeof(path), "%s/saved_cmdlines", dump_tracing_dir); out = fopen(path, "w"); if (out == NULL) return -1; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++) fprintf(out, "%d %s\n", (int)tc[i].pid, tc[i].comm); fclose(out); return 0; } static int dump_kallsyms(const char *dump_tracing_dir) { char path[PATH_MAX]; FILE *out; int i; struct syment *sp; snprintf(path, sizeof(path), "%s/kallsyms", dump_tracing_dir); out = fopen(path, "w"); if (out == NULL) return -1; for (sp = st->symtable; sp < st->symend; sp++) fprintf(out, "%lx %c %s\n", sp->value, sp->type, sp->name); for (i = 0; i < st->mods_installed; i++) { struct load_module *lm = &st->load_modules[i]; for (sp = lm->mod_symtable; sp <= lm->mod_symend; sp++) { if (!strncmp(sp->name, "_MODULE_", strlen("_MODULE_"))) continue; fprintf(out, "%lx %c %s\t[%s]\n", sp->value, sp->type, sp->name, lm->mod_name); } } fclose(out); return 0; } static int trace_cmd_data_output(int fd); static void ftrace_dump(int argc, char *argv[]) { int c; int dump_meta_data = 0; int dump_symbols = 0; char *dump_tracing_dir; char path[PATH_MAX]; int ret; while ((c = getopt(argc, argv, "smt")) != EOF) { switch(c) { case 's': dump_symbols = 1; break; case 'm': dump_meta_data = 1; break; case 't': if (dump_symbols || dump_meta_data || argc - optind > 1) cmd_usage(pc->curcmd, SYNOPSIS); else { char *trace_dat = "trace.dat"; int fd; if (argc - optind == 0) trace_dat = "trace.dat"; else if (argc - optind == 1) trace_dat = argv[optind]; fd = open(trace_dat, O_WRONLY | O_CREAT | O_TRUNC, 0644); trace_cmd_data_output(fd); close(fd); } return; default: cmd_usage(pc->curcmd, SYNOPSIS); return; } } if (argc - optind == 0) { dump_tracing_dir = "dump_tracing_dir"; } else if (argc - optind == 1) { dump_tracing_dir = argv[optind]; } else { cmd_usage(pc->curcmd, SYNOPSIS); return; } ret = mkdir(dump_tracing_dir, 0755); if (ret < 0) { if (errno == EEXIST) error(INFO, "mkdir: %s exists\n", dump_tracing_dir); return; } snprintf(path, sizeof(path), "%s/per_cpu", dump_tracing_dir); if (try_mkdir(path, 0755) < 0) return; if (ftrace_dump_buffers(path) < 0) return; if (dump_meta_data) { /* Dump event types */ snprintf(path, sizeof(path), "%s/events", dump_tracing_dir); if (try_mkdir(path, 0755) < 0) return; if (ftrace_dump_event_types(path) < 0) return; /* Dump pids with corresponding cmdlines */ if (dump_saved_cmdlines(dump_tracing_dir) < 0) return; } if (dump_symbols) { /* Dump all symbols of the kernel */ dump_kallsyms(dump_tracing_dir); } } static void ftrace_show(int argc, char *argv[]) { char buf[4096]; char tmp[] = "/tmp/crash.trace_dat.XXXXXX"; char *trace_cmd = "trace-cmd", *env_trace_cmd = getenv("TRACE_CMD"); int fd; FILE *file; size_t ret; size_t nitems __attribute__ ((__unused__)); /* check trace-cmd */ if (env_trace_cmd) trace_cmd = env_trace_cmd; buf[0] = 0; if ((file = popen(trace_cmd, "r"))) { ret = fread(buf, 1, sizeof(buf), file); buf[ret] = 0; pclose(file); } if (!strstr(buf, "trace-cmd version")) { if (env_trace_cmd) fprintf(fp, "Invalid environment TRACE_CMD: %s\n", env_trace_cmd); else fprintf(fp, "\"trace show\" requires trace-cmd.\n" "please set the environment TRACE_CMD " "if you installed it in a special path\n" ); return; } /* dump trace.dat to the temp file */ fd = mkstemp(tmp); if (fd < 0) return; if (trace_cmd_data_output(fd) < 0) goto out; /* splice the output of trace-cmd to user */ snprintf(buf, sizeof(buf), "%s report %s", trace_cmd, tmp); if (!(file = popen(buf, "r"))) goto out; for (;;) { ret = fread(buf, 1, sizeof(buf), file); if (ret == 0) break; nitems = fwrite(buf, 1, ret, fp); } pclose(file); out: close(fd); unlink(tmp); return; } static void cmd_ftrace(void) { if (argcnt == 1) show_basic_info(); else if (!strcmp(args[1], "dump")) ftrace_dump(argcnt - 1, args + 1); else if (!strcmp(args[1], "show")) ftrace_show(argcnt - 1, args + 1); else if (!strcmp(args[1], "report")) ftrace_show(argcnt - 1, args + 1); else cmd_usage(pc->curcmd, SYNOPSIS); } static char *help_ftrace[] = { "trace", "show or dump the tracing info", "[ ] [-f [no]]> | > ]", "trace", " shows the current tracer and other informations.", "", "trace show", " shows all events with readability text(sorted by timestamp)", "", "trace report", " the same as \"trace show\"", "", "trace dump [-sm] ", " dump ring_buffers to dest-dir. Then you can parse it", " by other tracing tools. The dirs and files are generated", " the same as debugfs/tracing.", " -m: also dump metadata of ftrace.", " -s: also dump symbols of the kernel.", "trace dump -t [output-file-name]", " dump ring_buffers and all meta data to a file that can", " be parsed by trace-cmd. Default output file name is \"trace.dat\".", NULL }; static struct command_table_entry command_table[] = { { "trace", cmd_ftrace, help_ftrace, 0 }, { NULL, 0, 0, 0 } }; static int ftrace_initialized; void __attribute__((constructor)) trace_init(void) { if (ftrace_init() < 0) return; ftrace_initialized = 1; register_extension(command_table); } void __attribute__((destructor)) trace_fini(void) { if (ftrace_initialized) ftrace_destroy(); } #define TRACE_CMD_FILE_VERSION_STRING "6" static inline int host_bigendian(void) { unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 }; unsigned int *ptr; ptr = (unsigned int *)str; return *ptr == 0x01020304; } static char *tmp_file_buf; static unsigned long long tmp_file_pos; static unsigned long long tmp_file_size; static int tmp_file_error; static int init_tmp_file(void) { tmp_file_buf = malloc(4096); if (tmp_file_buf == NULL) return -1; tmp_file_pos = 0; tmp_file_size = 4096; tmp_file_error = 0; return 0; } static void destory_tmp_file(void) { free(tmp_file_buf); } #define tmp_fprintf(fmt...) \ do { \ char *__buf = tmp_file_buf; \ unsigned long long __pos; \ \ if (tmp_file_error) \ break; \ __pos = tmp_file_pos; \ __pos += snprintf(__buf + __pos, tmp_file_size - __pos, fmt); \ if (__pos >= tmp_file_size) { \ tmp_file_size = __pos + tmp_file_size; \ __buf = realloc(__buf, tmp_file_size); \ if (!__buf) { \ tmp_file_error = 1; \ break; \ } \ tmp_file_buf = __buf; \ __pos = tmp_file_pos; \ __pos += snprintf(__buf + __pos, tmp_file_size - __pos, fmt);\ } \ tmp_file_pos = __pos; \ } while (0) static int tmp_file_record_size4(int fd) { unsigned int size = tmp_file_pos; if (tmp_file_error) return -1; if (write_and_check(fd, &size, 4)) return -1; return 0; } static int tmp_file_record_size8(int fd) { if (tmp_file_error) return -1; if (write_and_check(fd, &tmp_file_pos, 8)) return -1; return 0; } static int tmp_file_flush(int fd) { if (tmp_file_error) return -1; if (write_and_check(fd, tmp_file_buf, tmp_file_pos)) return -1; tmp_file_pos = 0; return 0; } static int save_initial_data(int fd) { int page_size; char buf[20]; if (write_and_check(fd, "\027\010\104tracing", 10)) return -1; if (write_and_check(fd, TRACE_CMD_FILE_VERSION_STRING, strlen(TRACE_CMD_FILE_VERSION_STRING) + 1)) return -1; /* Crash ensure core file endian and the host endian are the same */ if (host_bigendian()) buf[0] = 1; else buf[0] = 0; if (write_and_check(fd, buf, 1)) return -1; /* save size of long (this may not be what the kernel is) */ buf[0] = sizeof(long); if (write_and_check(fd, buf, 1)) return -1; page_size = PAGESIZE(); if (write_and_check(fd, &page_size, 4)) return -1; return 0; } static int save_header_files(int fd) { /* save header_page */ if (write_and_check(fd, "header_page", 12)) return -1; tmp_fprintf("\tfield: u64 timestamp;\toffset:0;\tsize:8;\tsigned:0;\n"); tmp_fprintf("\tfield: local_t commit;\toffset:8;\tsize:%u;\t" "signed:1;\n", (unsigned int)sizeof(long)); tmp_fprintf("\tfield: int overwrite;\toffset:8;\tsize:%u;\tsigned:1;\n", (unsigned int)sizeof(long)); tmp_fprintf("\tfield: char data;\toffset:%u;\tsize:%u;\tsigned:1;\n", (unsigned int)(8 + sizeof(long)), (unsigned int)(PAGESIZE() - 8 - sizeof(long))); if (tmp_file_record_size8(fd)) return -1; if (tmp_file_flush(fd)) return -1; /* save header_event */ if (write_and_check(fd, "header_event", 13)) return -1; tmp_fprintf( "# compressed entry header\n" "\ttype_len : 5 bits\n" "\ttime_delta : 27 bits\n" "\tarray : 32 bits\n" "\n" "\tpadding : type == 29\n" "\ttime_extend : type == 30\n" "\tdata max type_len == 28\n" ); if (tmp_file_record_size8(fd)) return -1; if (tmp_file_flush(fd)) return -1; return 0; } static int save_event_file(int fd, struct event_type *t) { int i, nfields; struct ftrace_field *fields; int printed_common_field = 0; tmp_fprintf("name: %s\n", t->name); tmp_fprintf("ID: %d\n", t->id); tmp_fprintf("format:\n"); if (ftrace_common_fields_count) { nfields = ftrace_common_fields_count; fields = ftrace_common_fields; } else { nfields = default_common_field_count; fields = &t->fields[t->nfields - nfields]; } again: for (i = nfields - 1; i >= 0; i--) { /* * Smartly shows the array type(except dynamic array). * Normal: * field:TYPE VAR * If TYPE := TYPE[LEN], it is shown: * field:TYPE VAR[LEN] */ struct ftrace_field *field = &fields[i]; const char *array_descriptor = strchr(field->type, '['); if (!strncmp(field->type, "__data_loc", 10)) array_descriptor = NULL; if (!array_descriptor) { tmp_fprintf("\tfield:%s %s;\toffset:%u;" "\tsize:%u;\tsigned:%d;\n", field->type, field->name, field->offset, field->size, !!field->is_signed); } else { tmp_fprintf("\tfield:%.*s %s%s;\toffset:%u;" "\tsize:%u;\tsigned:%d;\n", (int)(array_descriptor - field->type), field->type, field->name, array_descriptor, field->offset, field->size, !!field->is_signed); } } if (!printed_common_field) { tmp_fprintf("\n"); if (ftrace_common_fields_count) nfields = t->nfields; else nfields = t->nfields - default_common_field_count; fields = t->fields; printed_common_field = 1; goto again; } tmp_fprintf("\nprint fmt: %s\n", t->print_fmt); if (tmp_file_record_size8(fd)) return -1; return tmp_file_flush(fd); } static int save_system_files(int fd, int *system_ids, int system_id) { int i, total = 0; for (i = 0; i < nr_event_types; i++) { if (system_ids[i] == system_id) total++; } if (write_and_check(fd, &total, 4)) return -1; for (i = 0; i < nr_event_types; i++) { if (system_ids[i] != system_id) continue; if (save_event_file(fd, event_types[i])) return -1; } return 0; } static int save_events_files(int fd) { int system_id = 1, *system_ids; const char *system = "ftrace"; int i; int nr_systems; system_ids = calloc(sizeof(*system_ids), nr_event_types); if (system_ids == NULL) return -1; for (;;) { for (i = 0; i < nr_event_types; i++) { if (system_ids[i]) continue; if (!system) { system = event_types[i]->system; system_ids[i] = system_id; continue; } if (!strcmp(event_types[i]->system, system)) system_ids[i] = system_id; } if (!system) break; system_id++; system = NULL; } /* ftrace events */ if (save_system_files(fd, system_ids, 1)) goto fail; /* other systems events */ nr_systems = system_id - 2; if (write_and_check(fd, &nr_systems, 4)) goto fail; for (system_id = 2; system_id < nr_systems + 2; system_id++) { for (i = 0; i < nr_event_types; i++) { if (system_ids[i] == system_id) break; } if (write_and_check(fd, (void *)event_types[i]->system, strlen(event_types[i]->system) + 1)) goto fail; if (save_system_files(fd, system_ids, system_id)) goto fail; } free(system_ids); return 0; fail: free(system_ids); return -1; } static int save_proc_kallsyms(int fd) { int i; struct syment *sp; for (sp = st->symtable; sp < st->symend; sp++) tmp_fprintf("%lx %c %s\n", sp->value, sp->type, sp->name); for (i = 0; i < st->mods_installed; i++) { struct load_module *lm = &st->load_modules[i]; for (sp = lm->mod_symtable; sp <= lm->mod_symend; sp++) { if (!strncmp(sp->name, "_MODULE_", strlen("_MODULE_"))) continue; tmp_fprintf("%lx %c %s\t[%s]\n", sp->value, sp->type, sp->name, lm->mod_name); } } if (tmp_file_record_size4(fd)) return -1; return tmp_file_flush(fd); } static int add_print_address(long address) { char string[4096]; size_t len; int i; len = read_string(address, string, sizeof(string)); if (!len) return -1; tmp_fprintf("0x%lx : \"", address); for (i = 0; string[i]; i++) { switch (string[i]) { case '\n': tmp_fprintf("\\n"); break; case '\t': tmp_fprintf("\\t"); break; case '\\': tmp_fprintf("\\\\"); break; case '"': tmp_fprintf("\\\""); break; default: tmp_fprintf("%c", string[i]); } } tmp_fprintf("\"\n"); return 0; } static int save_ftrace_printk(int fd) { struct kernel_list_head *mod_fmt; struct syment *s, *e, *b; long bprintk_fmt_s, bprintk_fmt_e; long *address; size_t i, count; int addr_is_array = 0; s = symbol_search("__start___trace_bprintk_fmt"); e = symbol_search("__stop___trace_bprintk_fmt"); if (s == NULL || e == NULL) return -1; bprintk_fmt_s = s->value; bprintk_fmt_e = e->value; count = (bprintk_fmt_e - bprintk_fmt_s) / sizeof(long); if (count == 0) goto do_mods; address = malloc(count * sizeof(long)); if (address == NULL) return -1; if (!readmem(bprintk_fmt_s, KVADDR, address, count * sizeof(long), "get printk address", RETURN_ON_ERROR)) { free(address); return -1; } for (i = 0; i < count; i++) { if (add_print_address(address[i]) < 0) { free(address); return -1; } } free(address); do_mods: /* Add modules */ b = symbol_search("trace_bprintk_fmt_list"); if (!b) goto out; switch (MEMBER_TYPE("trace_bprintk_fmt", "fmt")) { case TYPE_CODE_ARRAY: addr_is_array = 1; break; case TYPE_CODE_PTR: default: /* default not array */ break; } mod_fmt = (struct kernel_list_head *)GETBUF(SIZE(list_head)); if (!readmem(b->value, KVADDR, mod_fmt, SIZE(list_head), "trace_bprintk_fmt_list contents", RETURN_ON_ERROR)) goto out_free; while ((unsigned long)mod_fmt->next != b->value) { unsigned long addr; addr = (unsigned long)mod_fmt->next + SIZE(list_head); if (!addr_is_array) { if (!readmem(addr, KVADDR, &addr, sizeof(addr), "trace_bprintk_fmt_list fmt field", RETURN_ON_ERROR)) goto out_free; } if (!readmem((unsigned long)mod_fmt->next, KVADDR, mod_fmt, SIZE(list_head), "trace_bprintk_fmt_list contents", RETURN_ON_ERROR)) goto out_free; if (add_print_address(addr) < 0) goto out_free; count++; } out_free: FREEBUF(mod_fmt); out: if (count == 0) { unsigned int size = 0; return write_and_check(fd, &size, 4); } if (tmp_file_record_size4(fd)) return -1; return tmp_file_flush(fd); } static int save_ftrace_cmdlines(int fd) { int i; struct task_context *tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++) tmp_fprintf("%d %s\n", (int)tc[i].pid, tc[i].comm); if (tmp_file_record_size8(fd)) return -1; return tmp_file_flush(fd); } static int save_res_data(int fd, int nr_cpu_buffers) { unsigned short option = 0; if (write_and_check(fd, &nr_cpu_buffers, 4)) return -1; if (write_and_check(fd, "options ", 10)) return -1; if (write_and_check(fd, &option, 2)) return -1; if (write_and_check(fd, "flyrecord", 10)) return -1; return 0; } static int save_record_data(int fd, int nr_cpu_buffers) { int i, j; unsigned long long offset, buffer_offset; void *page_tmp; offset = lseek(fd, 0, SEEK_CUR); offset += nr_cpu_buffers * 16; offset = (offset + (PAGESIZE() - 1)) & ~(PAGESIZE() - 1); buffer_offset = offset; for (i = 0; i < nr_cpu_ids; i++) { struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i]; unsigned long long buffer_size; if (!cpu_buffer->kaddr) continue; buffer_size = PAGESIZE() * cpu_buffer->nr_linear_pages; if (write_and_check(fd, &buffer_offset, 8)) return -1; if (write_and_check(fd, &buffer_size, 8)) return -1; buffer_offset += buffer_size; } page_tmp = malloc(PAGESIZE()); if (page_tmp == NULL) return -1; lseek(fd, offset, SEEK_SET); for (i = 0; i < nr_cpu_ids; i++) { struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i]; if (!cpu_buffer->kaddr) continue; for (j = 0; j < cpu_buffer->nr_linear_pages; j++) { if (ftrace_dump_page(fd, cpu_buffer->linear_pages[j], page_tmp) < 0) { free(page_tmp); return -1; } } } free(page_tmp); return 0; } static int __trace_cmd_data_output(int fd) { int i; int nr_cpu_buffers = 0; for (i = 0; i < nr_cpu_ids; i++) { struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i]; if (!cpu_buffer->kaddr) continue; nr_cpu_buffers++; } if (save_initial_data(fd)) return -1; if (save_header_files(fd)) return -1; if (save_events_files(fd)) /* ftrace events and other systems events */ return -1; if (save_proc_kallsyms(fd)) return -1; if (save_ftrace_printk(fd)) return -1; if (save_ftrace_cmdlines(fd)) return -1; if (save_res_data(fd, nr_cpu_buffers)) return -1; if (save_record_data(fd, nr_cpu_buffers)) return -1; return 0; } static int trace_cmd_data_output(int fd) { int ret; if (init_tmp_file()) return -1; ret = __trace_cmd_data_output(fd); destory_tmp_file(); return ret; } crash-7.1.4/extensions/eppic.c0000664000000000000000000000021312634305150014761 0ustar rootroot/* Place holder for proper working of the extension Makefile. Eppic crash application file is in eppic/applications/crash/eppic.c */ crash-7.1.4/extensions/snap.mk0000664000000000000000000000257712634305150015026 0ustar rootroot# # Copyright (C) 2009, 2011, 2013 David Anderson # Copyright (C) 2009, 2011, 2013 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # ifeq ($(shell arch), i686) TARGET=X86 TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64 endif ifeq ($(shell arch), ppc64) TARGET=PPC64 TARGET_CFLAGS=-m64 endif ifeq ($(shell arch), ppc64le) TARGET=PPC64 TARGET_CFLAGS=-m64 endif ifeq ($(shell arch), ia64) TARGET=IA64 TARGET_CFLAGS= endif ifeq ($(shell arch), x86_64) TARGET=X86_64 TARGET_CFLAGS= endif ifeq ($(shell /bin/ls /usr/include/crash/defs.h 2>/dev/null), /usr/include/crash/defs.h) INCDIR=/usr/include/crash endif ifeq ($(shell /bin/ls ../defs.h 2> /dev/null), ../defs.h) INCDIR=.. endif ifeq ($(shell /bin/ls ./defs.h 2> /dev/null), ./defs.h) INCDIR=. endif all: snap.so snap.so: $(INCDIR)/defs.h snap.c gcc -Wall -g -I$(INCDIR) -shared -rdynamic -o snap.so snap.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS) crash-7.1.4/dev.c0000775000000000000000000037537112634305150012267 0ustar rootroot/* dev.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013 David Anderson * Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static void dump_blkdevs(ulong); static void dump_chrdevs(ulong); static void dump_blkdevs_v2(ulong); static void dump_blkdevs_v3(ulong); static ulong search_cdev_map_probes(char *, int, int, ulong *); static ulong search_bdev_map_probes(char *, int, int, ulong *); static void do_pci(void); static void do_io(void); static void do_resource_list(ulong, char *, int); static const char *pci_strclass (uint, char *); static const char *pci_strvendor(uint, char *); static const char *pci_strdev(uint, uint, char *); static void diskio_option(void); static struct dev_table { ulong flags; } dev_table = { 0 }; struct dev_table *dt = &dev_table; #define DEV_INIT 0x1 #define DISKIO_INIT 0x2 void dev_init(void) { MEMBER_OFFSET_INIT(pci_dev_global_list, "pci_dev", "global_list"); MEMBER_OFFSET_INIT(pci_dev_next, "pci_dev", "next"); MEMBER_OFFSET_INIT(pci_dev_bus, "pci_dev", "bus"); MEMBER_OFFSET_INIT(pci_dev_devfn, "pci_dev", "devfn"); MEMBER_OFFSET_INIT(pci_dev_class, "pci_dev", "class"); MEMBER_OFFSET_INIT(pci_dev_device, "pci_dev", "device"); MEMBER_OFFSET_INIT(pci_dev_vendor, "pci_dev", "vendor"); MEMBER_OFFSET_INIT(pci_bus_number, "pci_bus", "number"); STRUCT_SIZE_INIT(resource, "resource"); if ((VALID_STRUCT(resource) && symbol_exists("do_resource_list")) || (VALID_STRUCT(resource) && symbol_exists("iomem_resource") && symbol_exists("ioport_resource"))) { MEMBER_OFFSET_INIT(resource_name, "resource", "name"); MEMBER_OFFSET_INIT(resource_start, "resource", "start"); MEMBER_OFFSET_INIT(resource_end, "resource", "end"); MEMBER_OFFSET_INIT(resource_sibling, "resource", "sibling"); MEMBER_OFFSET_INIT(resource_child, "resource", "child"); } else { STRUCT_SIZE_INIT(resource_entry_t, "resource_entry_t"); if (VALID_SIZE(resource_entry_t)) { MEMBER_OFFSET_INIT(resource_entry_t_from, "resource_entry_t", "from"); MEMBER_OFFSET_INIT(resource_entry_t_num, "resource_entry_t", "num"); MEMBER_OFFSET_INIT(resource_entry_t_name, "resource_entry_t", "name"); MEMBER_OFFSET_INIT(resource_entry_t_next, "resource_entry_t", "next"); } } dt->flags |= DEV_INIT; } /* * Generic command for character and block device data. */ void cmd_dev(void) { int c; ulong flags; flags = 0; while ((c = getopt(argcnt, args, "dpi")) != EOF) { switch(c) { case 'd': diskio_option(); return; case 'i': if (machine_type("S390X")) option_not_supported(c); do_io(); return; case 'p': if (machine_type("S390X") || (THIS_KERNEL_VERSION >= LINUX(2,6,26))) option_not_supported(c); do_pci(); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); dump_chrdevs(flags); fprintf(fp, "\n"); dump_blkdevs(flags); } #define MAX_DEV (255) #define MINORBITS 20 #define MINORMASK ((1U << MINORBITS) - 1) #define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) #define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) char *chrdev_hdr = "CHRDEV NAME "; char *blkdev_hdr = "BLKDEV NAME "; /* * Dump the character device data. */ static void dump_chrdevs(ulong flags) { int i; ulong addr, size; char buf[BUFSIZE]; char buf2[BUFSIZE]; struct chrdevs { ulong name; ulong ops; } chrdevs[MAX_DEV], *cp; ulong *cdp; char *char_device_struct_buf; ulong next, savenext, name, fops, cdev; int major, minor; int name_typecode; size_t name_size; if (!symbol_exists("chrdevs")) error(FATAL, "chrdevs: symbol does not exist\n"); addr = symbol_value("chrdevs"); size = VALID_STRUCT(char_device_struct) ? sizeof(void *) : sizeof(struct chrdevs); readmem(addr, KVADDR, &chrdevs[0], size * MAX_DEV, "chrdevs array", FAULT_ON_ERROR); fprintf(fp, "%s %s", chrdev_hdr, VADDR_PRLEN == 8 ? " " : ""); fprintf(fp, "%s ", mkstring(buf, VADDR_PRLEN, CENTER, "CDEV")); fprintf(fp, "%s\n", mkstring(buf, VADDR_PRLEN, LJUST, "OPERATIONS")); if (VALID_STRUCT(char_device_struct)) goto char_device_struct; for (i = 0, cp = &chrdevs[0]; i < MAX_DEV; i++, cp++) { if (!cp->ops) continue; fprintf(fp, " %3d ", i); if (cp->name) { if (read_string(cp->name, buf, BUFSIZE-1)) fprintf(fp, "%-11s ", buf); else fprintf(fp, "%-11s ", "(unknown)"); } else fprintf(fp, "%-11s ", "(unknown)"); sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, cp->ops); value_to_symstr(cp->ops, buf, 0); if (strlen(buf)) fprintf(fp, "<%s>", buf); fprintf(fp, "\n"); } return; char_device_struct: char_device_struct_buf = GETBUF(SIZE(char_device_struct)); cdp = (ulong *)&chrdevs[0]; name_typecode = MEMBER_TYPE("char_device_struct", "name"); name_size = (size_t)MEMBER_SIZE("char_device_struct", "name"); for (i = 0; i < MAX_DEV; i++, cdp++) { if (!(*cdp)) continue; readmem(*cdp, KVADDR, char_device_struct_buf, SIZE(char_device_struct), "char_device_struct", FAULT_ON_ERROR); next = ULONG(char_device_struct_buf + OFFSET(char_device_struct_next)); name = ULONG(char_device_struct_buf + OFFSET(char_device_struct_name)); switch (name_typecode) { case TYPE_CODE_ARRAY: snprintf(buf, name_size, "%s", char_device_struct_buf + OFFSET(char_device_struct_name)); break; case TYPE_CODE_PTR: default: if (!name || !read_string(name, buf, BUFSIZE-1)) break; } major = INT(char_device_struct_buf + OFFSET(char_device_struct_major)); minor = INT(char_device_struct_buf + OFFSET(char_device_struct_baseminor)); cdev = fops = 0; if (VALID_MEMBER(char_device_struct_cdev) && VALID_STRUCT(cdev)) { cdev = ULONG(char_device_struct_buf + OFFSET(char_device_struct_cdev)); if (cdev) { addr = cdev + OFFSET(cdev_ops); readmem(addr, KVADDR, &fops, sizeof(void *), "cdev ops", FAULT_ON_ERROR); } } else { fops = ULONG(char_device_struct_buf + OFFSET(char_device_struct_fops)); } if (!fops) fops = search_cdev_map_probes(buf, major, minor, &cdev); if (!fops) { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", buf); fprintf(fp, "%s%s\n", VADDR_PRLEN == 8 ? " " : " ", mkstring(buf, VADDR_PRLEN, CENTER, "(none)")); } else { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", buf); sprintf(buf2, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf2, cdev); value_to_symstr(fops, buf2, 0); if (strlen(buf2)) fprintf(fp, "%s", buf2); else fprintf(fp, "%lx", fops); fprintf(fp, "\n"); } if (CRASHDEBUG(1)) fprintf(fp, "%lx: major: %d minor: %d name: %s next: %lx cdev: %lx fops: %lx\n", *cdp, major, minor, buf, next, cdev, fops); while (next) { readmem(savenext = next, KVADDR, char_device_struct_buf, SIZE(char_device_struct), "char_device_struct", FAULT_ON_ERROR); next = ULONG(char_device_struct_buf + OFFSET(char_device_struct_next)); name = ULONG(char_device_struct_buf + OFFSET(char_device_struct_name)); switch (name_typecode) { case TYPE_CODE_ARRAY: snprintf(buf, name_size, "%s", char_device_struct_buf + OFFSET(char_device_struct_name)); break; case TYPE_CODE_PTR: default: if (!name || !read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)"); break; } major = INT(char_device_struct_buf + OFFSET(char_device_struct_major)); minor = INT(char_device_struct_buf + OFFSET(char_device_struct_baseminor)); fops = cdev = 0; if (VALID_MEMBER(char_device_struct_cdev) && VALID_STRUCT(cdev)) { cdev = ULONG(char_device_struct_buf + OFFSET(char_device_struct_cdev)); if (cdev) { addr = cdev + OFFSET(cdev_ops); readmem(addr, KVADDR, &fops, sizeof(void *), "cdev ops", FAULT_ON_ERROR); } } else { fops = ULONG(char_device_struct_buf + OFFSET(char_device_struct_fops)); } if (!fops) fops = search_cdev_map_probes(buf, major, minor, &cdev); if (!fops) { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", buf); fprintf(fp, "%s%s\n", VADDR_PRLEN == 8 ? " " : " ", mkstring(buf, VADDR_PRLEN, CENTER, "(none)")); } else { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", buf); sprintf(buf2, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf2, cdev); value_to_symstr(fops, buf2, 0); if (strlen(buf2)) fprintf(fp, "%s", buf2); else fprintf(fp, "%lx", fops); fprintf(fp, "\n"); } if (CRASHDEBUG(1)) fprintf(fp, "%lx: major: %d minor: %d name: %s next: %lx cdev: %lx fops: %lx\n", savenext, major, minor, buf, next, cdev, fops); } } FREEBUF(char_device_struct_buf); } /* * Search for a major/minor match by following the list headed * by the kobj_map.probes[major] array entry. The "data" member * points to a cdev structure containing the file_operations * pointer. */ static ulong search_cdev_map_probes(char *name, int major, int minor, ulong *cdev) { char *probe_buf; ulong probes[MAX_DEV]; ulong cdev_map, addr, next, ops, probe_data; uint probe_dev; if (kernel_symbol_exists("cdev_map")) get_symbol_data("cdev_map", sizeof(ulong), &cdev_map); else return 0; addr = cdev_map + OFFSET(kobj_map_probes); if (!readmem(addr, KVADDR, &probes[0], sizeof(void *) * MAX_DEV, "cdev_map.probes[]", QUIET|RETURN_ON_ERROR)) return 0; ops = 0; probe_buf = GETBUF(SIZE(probe)); next = probes[major]; while (next) { if (!readmem(next, KVADDR, probe_buf, SIZE(probe), "struct probe", QUIET|RETURN_ON_ERROR)) break; probe_dev = UINT(probe_buf + OFFSET(probe_dev)); if ((MAJOR(probe_dev) == major) && (MINOR(probe_dev) == minor)) { probe_data = ULONG(probe_buf + OFFSET(probe_data)); addr = probe_data + OFFSET(cdev_ops); if (!readmem(addr, KVADDR, &ops, sizeof(void *), "cdev ops", QUIET|RETURN_ON_ERROR)) ops = 0; else *cdev = probe_data; break; } next = ULONG(probe_buf + OFFSET(probe_next)); } FREEBUF(probe_buf); return ops; } /* * Dump the block device data. */ static void dump_blkdevs(ulong flags) { int i; ulong addr; char buf[BUFSIZE]; struct blkdevs { ulong name; ulong ops; } blkdevs[MAX_DEV], *bp; if (kernel_symbol_exists("major_names") && kernel_symbol_exists("bdev_map")) { dump_blkdevs_v3(flags); return; } if (symbol_exists("all_bdevs")) { dump_blkdevs_v2(flags); return; } if (!symbol_exists("blkdevs")) error(FATAL, "blkdevs or all_bdevs: symbols do not exist\n"); addr = symbol_value("blkdevs"); readmem(addr, KVADDR, &blkdevs[0], sizeof(struct blkdevs) * MAX_DEV, "blkdevs array", FAULT_ON_ERROR); fprintf(fp, "%s%s\n", blkdev_hdr, mkstring(buf, VADDR_PRLEN, CENTER, "OPERATIONS")); for (i = 0, bp = &blkdevs[0]; i < MAX_DEV; i++, bp++) { if (!bp->ops) continue; fprintf(fp, " %3d ", i); if (bp->name) { if (read_string(bp->name, buf, BUFSIZE-1)) fprintf(fp, "%-11s ", buf); else fprintf(fp, "%-11s ", "(unknown)"); } else fprintf(fp, "%-11s ", "(unknown)"); sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, bp->ops); value_to_symstr(bp->ops, buf, 0); if (strlen(buf)) fprintf(fp, "<%s>", buf); fprintf(fp, "\n"); } } /* * block device dump for 2.6 */ static void dump_blkdevs_v2(ulong flags) { struct list_data list_data, *ld; ulong *major_fops, *bdevlist, *gendisklist, *majorlist; int i, j, bdevcnt, len; char *block_device_buf, *gendisk_buf, *blk_major_name_buf; ulong next, savenext, fops; int major, total; char buf[BUFSIZE]; if (!symbol_exists("major_names")) error(FATAL, "major_names[] array doesn't exist in this kernel\n"); len = get_array_length("major_names", NULL, 0); block_device_buf = GETBUF(SIZE(block_device)); gendisk_buf = GETBUF(SIZE(gendisk)); ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("all_bdevs", sizeof(void *), &ld->start); ld->end = symbol_value("all_bdevs"); ld->list_head_offset = OFFSET(block_device_bd_list); hq_open(); bdevcnt = do_list(ld); bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); gendisklist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); bdevcnt = retrieve_list(bdevlist, bdevcnt); hq_close(); total = MAX(len, bdevcnt); major_fops = (ulong *)GETBUF(sizeof(void *) * total); /* * go through the block_device list, emulating: * * ret += bdev->bd_inode->i_mapping->nrpages; */ for (i = 0; i < bdevcnt; i++) { readmem(bdevlist[i], KVADDR, block_device_buf, SIZE(block_device), "block_device buffer", FAULT_ON_ERROR); gendisklist[i] = ULONG(block_device_buf + OFFSET(block_device_bd_disk)); if (CRASHDEBUG(1)) fprintf(fp, "[%d] %lx -> %lx\n", i, bdevlist[i], gendisklist[i]); } for (i = 1; i < bdevcnt; i++) { for (j = 0; j < i; j++) { if (gendisklist[i] == gendisklist[j]) gendisklist[i] = 0; } } for (i = 0; i < bdevcnt; i++) { if (!gendisklist[i]) continue; readmem(gendisklist[i], KVADDR, gendisk_buf, SIZE(gendisk), "gendisk buffer", FAULT_ON_ERROR); fops = ULONG(gendisk_buf + OFFSET(gendisk_fops)); major = UINT(gendisk_buf + OFFSET(gendisk_major)); strncpy(buf, gendisk_buf + OFFSET(gendisk_disk_name), 32); if (CRASHDEBUG(1)) fprintf(fp, "%lx: name: [%s] major: %d fops: %lx\n", gendisklist[i], buf, major, fops); if (fops && (major < total)) major_fops[major] = fops; } FREEBUF(bdevlist); FREEBUF(gendisklist); FREEBUF(block_device_buf); FREEBUF(gendisk_buf); if (CRASHDEBUG(1)) fprintf(fp, "major_names[%d]\n", len); majorlist = (ulong *)GETBUF(len * sizeof(void *)); blk_major_name_buf = GETBUF(SIZE(blk_major_name)); readmem(symbol_value("major_names"), KVADDR, &majorlist[0], sizeof(void *) * len, "major_names array", FAULT_ON_ERROR); fprintf(fp, "%s%s\n", blkdev_hdr, mkstring(buf, VADDR_PRLEN, CENTER, "OPERATIONS")); for (i = 0; i < len; i++) { if (!majorlist[i]) continue; readmem(majorlist[i], KVADDR, blk_major_name_buf, SIZE(blk_major_name), "blk_major_name buffer", FAULT_ON_ERROR); major = UINT(blk_major_name_buf + OFFSET(blk_major_name_major)); buf[0] = NULLCHAR; strncpy(buf, blk_major_name_buf + OFFSET(blk_major_name_name), 16); next = ULONG(blk_major_name_buf + OFFSET(blk_major_name_next)); if (CRASHDEBUG(1)) fprintf(fp, "[%d] %lx major: %d name: %s next: %lx fops: %lx\n", i, majorlist[i], major, buf, next, major_fops[major]); fprintf(fp, " %3d ", major); fprintf(fp, "%-12s ", strlen(buf) ? buf : "(unknown)"); if (major_fops[major]) { sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, major_fops[major]); value_to_symstr(major_fops[major], buf, 0); if (strlen(buf)) fprintf(fp, "<%s>", buf); } else fprintf(fp, " (unknown)"); fprintf(fp, "\n"); while (next) { readmem(savenext = next, KVADDR, blk_major_name_buf, SIZE(blk_major_name), "blk_major_name buffer", FAULT_ON_ERROR); major = UINT(blk_major_name_buf + OFFSET(blk_major_name_major)); strncpy(buf, blk_major_name_buf + OFFSET(blk_major_name_name), 16); next = ULONG(blk_major_name_buf + OFFSET(blk_major_name_next)); if (CRASHDEBUG(1)) fprintf(fp, "[%d] %lx major: %d name: %s next: %lx fops: %lx\n", i, savenext, major, buf, next, major_fops[major]); fprintf(fp, " %3d ", major); fprintf(fp, "%-12s ", strlen(buf) ? buf : "(unknown)"); if (major_fops[major]) { sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, major_fops[major]); value_to_symstr(major_fops[major], buf, 0); if (strlen(buf)) fprintf(fp, "<%s>", buf); } else fprintf(fp, " (unknown)"); fprintf(fp, "\n"); } } FREEBUF(majorlist); FREEBUF(major_fops); FREEBUF(blk_major_name_buf); } static void dump_blkdevs_v3(ulong flags) { int i, len; ulong blk_major_name; char *blk_major_name_buf; char buf[BUFSIZE]; uint major; ulong gendisk, addr, fops; if (!(len = get_array_length("major_names", NULL, 0))) len = MAX_DEV; fprintf(fp, "%s %s", blkdev_hdr, VADDR_PRLEN == 8 ? " " : ""); fprintf(fp, "%s ", mkstring(buf, VADDR_PRLEN, CENTER|RJUST, "GENDISK")); fprintf(fp, "%s\n", mkstring(buf, VADDR_PRLEN, LJUST, "OPERATIONS")); blk_major_name_buf = GETBUF(SIZE(blk_major_name)); gendisk = 0; for (i = 0; i < len; i++) { addr = symbol_value("major_names") + (i * sizeof(void *)); readmem(addr, KVADDR, &blk_major_name, sizeof(void *), "major_names[] entry", FAULT_ON_ERROR); if (!blk_major_name) continue; readmem(blk_major_name, KVADDR, blk_major_name_buf, SIZE(blk_major_name), "blk_major_name", FAULT_ON_ERROR); major = UINT(blk_major_name_buf + OFFSET(blk_major_name_major)); buf[0] = NULLCHAR; strncpy(buf, blk_major_name_buf + OFFSET(blk_major_name_name), 16); fops = search_bdev_map_probes(buf, major == i ? major : i, UNUSED, &gendisk); if (CRASHDEBUG(1)) fprintf(fp, "blk_major_name: %lx block major: %d name: %s gendisk: %lx fops: %lx\n", blk_major_name, major, buf, gendisk, fops); if (!fops) { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", strlen(buf) ? buf : "(unknown)"); fprintf(fp, "%s%s\n", VADDR_PRLEN == 8 ? " " : " ", mkstring(buf, VADDR_PRLEN, CENTER, "(none)")); continue; } fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", strlen(buf) ? buf : "(unknown)"); sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, gendisk); value_to_symstr(fops, buf, 0); if (strlen(buf)) fprintf(fp, "%s", buf); else fprintf(fp, "%lx", fops); fprintf(fp, "\n"); } } static ulong search_bdev_map_probes(char *name, int major, int minor, ulong *gendisk) { char *probe_buf, *gendisk_buf; ulong probes[MAX_DEV]; ulong bdev_map, addr, next, probe_data, fops; uint probe_dev; get_symbol_data("bdev_map", sizeof(ulong), &bdev_map); addr = bdev_map + OFFSET(kobj_map_probes); if (!readmem(addr, KVADDR, &probes[0], sizeof(void *) * MAX_DEV, "bdev_map.probes[]", QUIET|RETURN_ON_ERROR)) return 0; probe_buf = GETBUF(SIZE(probe)); gendisk_buf = GETBUF(SIZE(gendisk)); fops = 0; for (next = probes[major]; next; next = ULONG(probe_buf + OFFSET(probe_next))) { if (!readmem(next, KVADDR, probe_buf, SIZE(probe), "struct probe", QUIET|RETURN_ON_ERROR)) break; probe_data = ULONG(probe_buf + OFFSET(probe_data)); if (!probe_data) continue; probe_dev = UINT(probe_buf + OFFSET(probe_dev)); if (MAJOR(probe_dev) != major) continue; if (!readmem(probe_data, KVADDR, gendisk_buf, SIZE(gendisk), "gendisk buffer", QUIET|RETURN_ON_ERROR)) break; fops = ULONG(gendisk_buf + OFFSET(gendisk_fops)); if (fops) { *gendisk = probe_data; break; } } FREEBUF(probe_buf); FREEBUF(gendisk_buf); return fops; } void dump_dev_table(void) { struct dev_table *dt; int others; dt = &dev_table; others = 0; fprintf(fp, " flags: %lx (", dt->flags); if (dt->flags & DEV_INIT) fprintf(fp, "%sDEV_INIT", others++ ? "|" : ""); if (dt->flags & DISKIO_INIT) fprintf(fp, "%sDISKIO_INIT", others++ ? "|" : ""); fprintf(fp, ")\n"); } /* * Dump the I/O ports. */ static void do_io(void) { int i, c, len, wrap, cnt, size; ulong *resource_list, name, start, end; char *resource_buf, *p1; struct list_data list_data, *ld; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (symbol_exists("get_ioport_list")) /* linux 2.2 */ goto ioport_list; if (symbol_exists("do_resource_list")) /* linux 2.4 */ goto resource_list; if (symbol_exists("iomem_resource") && symbol_exists("ioport_resource")) goto resource_list; return; ioport_list: /* * ioport */ fprintf(fp, "%s %s NAME\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "RESOURCE"), mkstring(buf2, 9, CENTER|LJUST, "RANGE")); wrap = VADDR_PRLEN + 2 + 9 + 2; resource_buf = GETBUF(SIZE(resource_entry_t)); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->start = 0xc026cf20; readmem(symbol_value("iolist") + OFFSET(resource_entry_t_next), KVADDR, &ld->start, sizeof(void *), "iolist.next", FAULT_ON_ERROR); ld->member_offset = OFFSET(resource_entry_t_next); hq_open(); cnt = do_list(ld); if (!cnt) return; resource_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(resource_list, cnt); hq_close(); for (i = 0; i < cnt; i++) { fprintf(fp, "%lx ", resource_list[i]); readmem(resource_list[i], KVADDR, resource_buf, SIZE(resource_entry_t), "resource_entry_t", FAULT_ON_ERROR); start = ULONG(resource_buf + OFFSET(resource_entry_t_from)); end = ULONG(resource_buf + OFFSET(resource_entry_t_num)); end += start; fprintf(fp, "%04lx-%04lx ", start, end); name = ULONG(resource_buf + OFFSET(resource_entry_t_name)); if (!read_string(name, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown)"); if (wrap + strlen(buf1) <= 80) fprintf(fp, "%s\n", buf1); else { len = wrap + strlen(buf1) - 80; for (c = 0, p1 = &buf1[strlen(buf1)-1]; p1 > buf1; p1--, c++) { if (*p1 != ' ') continue; if (c >= len) { *p1 = NULLCHAR; break; } } fprintf(fp, "%s\n", buf1); if (*p1 == NULLCHAR) { pad_line(fp, wrap, ' '); fprintf(fp, "%s\n", p1+1); } } } return; resource_list: resource_buf = GETBUF(SIZE(resource)); /* * ioport */ readmem(symbol_value("ioport_resource") + OFFSET(resource_end), KVADDR, &end, sizeof(long), "ioport_resource.end", FAULT_ON_ERROR); size = (end > 0xffff) ? 8 : 4; fprintf(fp, "%s %s NAME\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "RESOURCE"), mkstring(buf2, (size*2) + 1, CENTER|LJUST, "RANGE")); do_resource_list(symbol_value("ioport_resource"), resource_buf, size); /* * iomem */ readmem(symbol_value("iomem_resource") + OFFSET(resource_end), KVADDR, &end, sizeof(long), "iomem_resource.end", FAULT_ON_ERROR); size = (end > 0xffff) ? 8 : 4; fprintf(fp, "\n%s %s NAME\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "RESOURCE"), mkstring(buf2, (size*2) + 1, CENTER|LJUST, "RANGE")); do_resource_list(symbol_value("iomem_resource"), resource_buf, size); return; } static void do_resource_list(ulong first_entry, char *resource_buf, int size) { ulong entry, name, start, end, child, sibling; int c, wrap, len; char buf1[BUFSIZE]; char *fmt, *p1; fmt = NULL; switch (size) { case 4: fmt = "%8lx %04lx-%04lx"; break; case 8: fmt = "%8lx %08lx-%08lx"; break; } wrap = VADDR_PRLEN + 2 + ((size*2)+1) + 2; entry = first_entry; while (entry) { readmem(entry, KVADDR, resource_buf, SIZE(resource), "resource", FAULT_ON_ERROR); start = ULONG(resource_buf + OFFSET(resource_start)); end = ULONG(resource_buf + OFFSET(resource_end)); name = ULONG(resource_buf + OFFSET(resource_name)); child = ULONG(resource_buf + OFFSET(resource_child)); sibling = ULONG(resource_buf + OFFSET(resource_sibling)); if (!read_string(name, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown)"); fprintf(fp, fmt, entry, start, end); if (wrap + strlen(buf1) <= 80) fprintf(fp, " %s\n", buf1); else { len = wrap + strlen(buf1) - 80; for (c = 0, p1 = &buf1[strlen(buf1)-1]; p1 > buf1; p1--, c++) { if (*p1 != ' ') continue; if (c >= len) { *p1 = NULLCHAR; break; } } fprintf(fp, " %s\n", buf1); if (*p1 == NULLCHAR) { pad_line(fp, wrap, ' '); fprintf(fp, "%s\n", p1+1); } } if (child && (child != entry)) do_resource_list(child, resource_buf, size); entry = sibling; } } /* * PCI defines taken from 2.2.17 version of pci.h */ #define USE_2_2_17_PCI_H #ifdef USE_2_2_17_PCI_H /* * PCI defines and function prototypes * Copyright 1994, Drew Eckhardt * Copyright 1997--1999 Martin Mares * * For more information, please consult the following manuals (look at * http://www.pcisig.com/ for how to get them): * * PCI BIOS Specification * PCI Local Bus Specification * PCI to PCI Bridge Specification * PCI System Design Guide */ /* * Under PCI, each device has 256 bytes of configuration address space, * of which the first 64 bytes are standardized as follows: */ #define PCI_VENDOR_ID 0x00 /* 16 bits */ #define PCI_DEVICE_ID 0x02 /* 16 bits */ #define PCI_COMMAND 0x04 /* 16 bits */ #define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */ #define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */ #define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */ #define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */ #define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */ #define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */ #define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */ #define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */ #define PCI_COMMAND_SERR 0x100 /* Enable SERR */ #define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */ #define PCI_STATUS 0x06 /* 16 bits */ #define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ #define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */ #define PCI_STATUS_UDF 0x40 /* Support User Definable Features */ #define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */ #define PCI_STATUS_PARITY 0x100 /* Detected parity error */ #define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */ #define PCI_STATUS_DEVSEL_FAST 0x000 #define PCI_STATUS_DEVSEL_MEDIUM 0x200 #define PCI_STATUS_DEVSEL_SLOW 0x400 #define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */ #define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */ #define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */ #define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */ #define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */ #define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */ #define PCI_REVISION_ID 0x08 /* Revision ID */ #define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */ #define PCI_CLASS_DEVICE 0x0a /* Device class */ #define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */ #define PCI_LATENCY_TIMER 0x0d /* 8 bits */ #define PCI_HEADER_TYPE 0x0e /* 8 bits */ #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 #define PCI_BIST 0x0f /* 8 bits */ #define PCI_BIST_CODE_MASK 0x0f /* Return result */ #define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */ #define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */ /* * Base addresses specify locations in memory or I/O space. * Decoded size can be determined by writing a value of * 0xffffffff to the register, and reading it back. Only * 1 bits are decoded. */ #define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */ #define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */ #define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */ #define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */ #define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */ #define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */ #define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */ #define PCI_BASE_ADDRESS_SPACE_IO 0x01 #define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00 #define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06 #define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */ #define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M */ #define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ #define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ #define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL) #define PCI_BASE_ADDRESS_IO_MASK (~0x03UL) /* bit 1 is reserved if address_space = 1 */ /* Header type 0 (normal devices) */ #define PCI_CARDBUS_CIS 0x28 #define PCI_SUBSYSTEM_VENDOR_ID 0x2c #define PCI_SUBSYSTEM_ID 0x2e #define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ #define PCI_ROM_ADDRESS_ENABLE 0x01 #define PCI_ROM_ADDRESS_MASK (~0x7ffUL) #define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ /* 0x35-0x3b are reserved */ #define PCI_INTERRUPT_LINE 0x3c /* 8 bits */ #define PCI_INTERRUPT_PIN 0x3d /* 8 bits */ #define PCI_MIN_GNT 0x3e /* 8 bits */ #define PCI_MAX_LAT 0x3f /* 8 bits */ /* Header type 1 (PCI-to-PCI bridges) */ #define PCI_PRIMARY_BUS 0x18 /* Primary bus number */ #define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */ #define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */ #define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */ #define PCI_IO_BASE 0x1c /* I/O range behind the bridge */ #define PCI_IO_LIMIT 0x1d #define PCI_IO_RANGE_TYPE_MASK 0x0f /* I/O bridging type */ #define PCI_IO_RANGE_TYPE_16 0x00 #define PCI_IO_RANGE_TYPE_32 0x01 #define PCI_IO_RANGE_MASK ~0x0f #define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */ #define PCI_MEMORY_BASE 0x20 /* Memory range behind */ #define PCI_MEMORY_LIMIT 0x22 #define PCI_MEMORY_RANGE_TYPE_MASK 0x0f #define PCI_MEMORY_RANGE_MASK ~0x0f #define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */ #define PCI_PREF_MEMORY_LIMIT 0x26 #define PCI_PREF_RANGE_TYPE_MASK 0x0f #define PCI_PREF_RANGE_TYPE_32 0x00 #define PCI_PREF_RANGE_TYPE_64 0x01 #define PCI_PREF_RANGE_MASK ~0x0f #define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */ #define PCI_PREF_LIMIT_UPPER32 0x2c #define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */ #define PCI_IO_LIMIT_UPPER16 0x32 /* 0x34-0x3b is reserved */ #define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */ /* 0x3c-0x3d are same as for htype 0 */ #define PCI_BRIDGE_CONTROL 0x3e #define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */ #define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */ #define PCI_BRIDGE_CTL_NO_ISA 0x04 /* Disable bridging of ISA ports */ #define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */ #define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */ #define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */ #define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */ /* Header type 2 (CardBus bridges) */ /* 0x14-0x15 reserved */ #define PCI_CB_SEC_STATUS 0x16 /* Secondary status */ #define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */ #define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */ #define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */ #define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */ #define PCI_CB_MEMORY_BASE_0 0x1c #define PCI_CB_MEMORY_LIMIT_0 0x20 #define PCI_CB_MEMORY_BASE_1 0x24 #define PCI_CB_MEMORY_LIMIT_1 0x28 #define PCI_CB_IO_BASE_0 0x2c #define PCI_CB_IO_BASE_0_HI 0x2e #define PCI_CB_IO_LIMIT_0 0x30 #define PCI_CB_IO_LIMIT_0_HI 0x32 #define PCI_CB_IO_BASE_1 0x34 #define PCI_CB_IO_BASE_1_HI 0x36 #define PCI_CB_IO_LIMIT_1 0x38 #define PCI_CB_IO_LIMIT_1_HI 0x3a #define PCI_CB_IO_RANGE_MASK ~0x03 /* 0x3c-0x3d are same as for htype 0 */ #define PCI_CB_BRIDGE_CONTROL 0x3e #define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */ #define PCI_CB_BRIDGE_CTL_SERR 0x02 #define PCI_CB_BRIDGE_CTL_ISA 0x04 #define PCI_CB_BRIDGE_CTL_VGA 0x08 #define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20 #define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */ #define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */ #define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */ #define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200 #define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400 #define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40 #define PCI_CB_SUBSYSTEM_ID 0x42 #define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */ /* 0x48-0x7f reserved */ /* Capability lists */ #define PCI_CAP_LIST_ID 0 /* Capability ID */ #define PCI_CAP_ID_PM 0x01 /* Power Management */ #define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */ #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ /* Device classes and subclasses */ #define PCI_CLASS_NOT_DEFINED 0x0000 #define PCI_CLASS_NOT_DEFINED_VGA 0x0001 #define PCI_BASE_CLASS_STORAGE 0x01 #define PCI_CLASS_STORAGE_SCSI 0x0100 #define PCI_CLASS_STORAGE_IDE 0x0101 #define PCI_CLASS_STORAGE_FLOPPY 0x0102 #define PCI_CLASS_STORAGE_IPI 0x0103 #define PCI_CLASS_STORAGE_RAID 0x0104 #define PCI_CLASS_STORAGE_OTHER 0x0180 #define PCI_BASE_CLASS_NETWORK 0x02 #define PCI_CLASS_NETWORK_ETHERNET 0x0200 #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 #define PCI_CLASS_NETWORK_FDDI 0x0202 #define PCI_CLASS_NETWORK_ATM 0x0203 #define PCI_CLASS_NETWORK_OTHER 0x0280 #define PCI_BASE_CLASS_DISPLAY 0x03 #define PCI_CLASS_DISPLAY_VGA 0x0300 #define PCI_CLASS_DISPLAY_XGA 0x0301 #define PCI_CLASS_DISPLAY_OTHER 0x0380 #define PCI_BASE_CLASS_MULTIMEDIA 0x04 #define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 #define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 #define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 #define PCI_BASE_CLASS_MEMORY 0x05 #define PCI_CLASS_MEMORY_RAM 0x0500 #define PCI_CLASS_MEMORY_FLASH 0x0501 #define PCI_CLASS_MEMORY_OTHER 0x0580 #define PCI_BASE_CLASS_BRIDGE 0x06 #define PCI_CLASS_BRIDGE_HOST 0x0600 #define PCI_CLASS_BRIDGE_ISA 0x0601 #define PCI_CLASS_BRIDGE_EISA 0x0602 #define PCI_CLASS_BRIDGE_MC 0x0603 #define PCI_CLASS_BRIDGE_PCI 0x0604 #define PCI_CLASS_BRIDGE_PCMCIA 0x0605 #define PCI_CLASS_BRIDGE_NUBUS 0x0606 #define PCI_CLASS_BRIDGE_CARDBUS 0x0607 #define PCI_CLASS_BRIDGE_OTHER 0x0680 #define PCI_BASE_CLASS_COMMUNICATION 0x07 #define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 #define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 #define PCI_CLASS_COMMUNICATION_OTHER 0x0780 #define PCI_BASE_CLASS_SYSTEM 0x08 #define PCI_CLASS_SYSTEM_PIC 0x0800 #define PCI_CLASS_SYSTEM_DMA 0x0801 #define PCI_CLASS_SYSTEM_TIMER 0x0802 #define PCI_CLASS_SYSTEM_RTC 0x0803 #define PCI_CLASS_SYSTEM_OTHER 0x0880 #define PCI_BASE_CLASS_INPUT 0x09 #define PCI_CLASS_INPUT_KEYBOARD 0x0900 #define PCI_CLASS_INPUT_PEN 0x0901 #define PCI_CLASS_INPUT_MOUSE 0x0902 #define PCI_CLASS_INPUT_OTHER 0x0980 #define PCI_BASE_CLASS_DOCKING 0x0a #define PCI_CLASS_DOCKING_GENERIC 0x0a00 #define PCI_CLASS_DOCKING_OTHER 0x0a01 #define PCI_BASE_CLASS_PROCESSOR 0x0b #define PCI_CLASS_PROCESSOR_386 0x0b00 #define PCI_CLASS_PROCESSOR_486 0x0b01 #define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 #define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 #define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 #define PCI_CLASS_PROCESSOR_CO 0x0b40 #define PCI_BASE_CLASS_SERIAL 0x0c #define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 #define PCI_CLASS_SERIAL_ACCESS 0x0c01 #define PCI_CLASS_SERIAL_SSA 0x0c02 #define PCI_CLASS_SERIAL_USB 0x0c03 #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 #define PCI_BASE_CLASS_INTELLIGENT 0x0e #define PCI_CLASS_INTELLIGENT_I2O 0x0e00 #define PCI_CLASS_HOT_SWAP_CONTROLLER 0xff00 #define PCI_CLASS_OTHERS 0xff /* * Vendor and card ID's: sort these numerically according to vendor * (and according to card ID within vendor). Send all updates to * . */ #define PCI_VENDOR_ID_COMPAQ 0x0e11 #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508 #define PCI_DEVICE_ID_COMPAQ_1280 0x3033 #define PCI_DEVICE_ID_COMPAQ_TRIFLEX 0x4000 #define PCI_DEVICE_ID_COMPAQ_6010 0x6010 #define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10 #define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32 #define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34 #define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35 #define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40 #define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43 #define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011 #define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130 #define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150 #define PCI_VENDOR_ID_NCR 0x1000 #define PCI_DEVICE_ID_NCR_53C810 0x0001 #define PCI_DEVICE_ID_NCR_53C820 0x0002 #define PCI_DEVICE_ID_NCR_53C825 0x0003 #define PCI_DEVICE_ID_NCR_53C815 0x0004 #define PCI_DEVICE_ID_NCR_53C860 0x0006 #define PCI_DEVICE_ID_NCR_53C1510D 0x000a #define PCI_DEVICE_ID_NCR_53C896 0x000b #define PCI_DEVICE_ID_NCR_53C895 0x000c #define PCI_DEVICE_ID_NCR_53C885 0x000d #define PCI_DEVICE_ID_NCR_53C875 0x000f #define PCI_DEVICE_ID_NCR_53C1510 0x0010 #define PCI_DEVICE_ID_NCR_53C875J 0x008f #define PCI_VENDOR_ID_ATI 0x1002 #define PCI_DEVICE_ID_ATI_68800 0x4158 #define PCI_DEVICE_ID_ATI_215CT222 0x4354 #define PCI_DEVICE_ID_ATI_210888CX 0x4358 #define PCI_DEVICE_ID_ATI_215GB 0x4742 #define PCI_DEVICE_ID_ATI_215GD 0x4744 #define PCI_DEVICE_ID_ATI_215GI 0x4749 #define PCI_DEVICE_ID_ATI_215GP 0x4750 #define PCI_DEVICE_ID_ATI_215GQ 0x4751 #define PCI_DEVICE_ID_ATI_215GT 0x4754 #define PCI_DEVICE_ID_ATI_215GTB 0x4755 #define PCI_DEVICE_ID_ATI_210888GX 0x4758 #define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45 #define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46 #define PCI_DEVICE_ID_ATI_215LG 0x4c47 #define PCI_DEVICE_ID_ATI_264LT 0x4c54 #define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046 #define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052 #define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245 #define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246 #define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b #define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c #define PCI_DEVICE_ID_ATI_264VT 0x5654 #define PCI_VENDOR_ID_VLSI 0x1004 #define PCI_DEVICE_ID_VLSI_82C592 0x0005 #define PCI_DEVICE_ID_VLSI_82C593 0x0006 #define PCI_DEVICE_ID_VLSI_82C594 0x0007 #define PCI_DEVICE_ID_VLSI_82C597 0x0009 #define PCI_DEVICE_ID_VLSI_82C541 0x000c #define PCI_DEVICE_ID_VLSI_82C543 0x000d #define PCI_DEVICE_ID_VLSI_82C532 0x0101 #define PCI_DEVICE_ID_VLSI_82C534 0x0102 #define PCI_DEVICE_ID_VLSI_82C535 0x0104 #define PCI_DEVICE_ID_VLSI_82C147 0x0105 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 #define PCI_VENDOR_ID_ADL 0x1005 #define PCI_DEVICE_ID_ADL_2301 0x2301 #define PCI_VENDOR_ID_NS 0x100b #define PCI_DEVICE_ID_NS_87415 0x0002 #define PCI_DEVICE_ID_NS_87410 0xd001 #define PCI_VENDOR_ID_TSENG 0x100c #define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 #define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 #define PCI_DEVICE_ID_TSENG_W32P_c 0x3206 #define PCI_DEVICE_ID_TSENG_W32P_d 0x3207 #define PCI_DEVICE_ID_TSENG_ET6000 0x3208 #define PCI_VENDOR_ID_WEITEK 0x100e #define PCI_DEVICE_ID_WEITEK_P9000 0x9001 #define PCI_DEVICE_ID_WEITEK_P9100 0x9100 #define PCI_VENDOR_ID_DEC 0x1011 #define PCI_DEVICE_ID_DEC_BRD 0x0001 #define PCI_DEVICE_ID_DEC_TULIP 0x0002 #define PCI_DEVICE_ID_DEC_TGA 0x0004 #define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009 #define PCI_DEVICE_ID_DEC_TGA2 0x000D #define PCI_DEVICE_ID_DEC_FDDI 0x000F #define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014 #define PCI_DEVICE_ID_DEC_21142 0x0019 #define PCI_DEVICE_ID_DEC_21052 0x0021 #define PCI_DEVICE_ID_DEC_21150 0x0022 #define PCI_DEVICE_ID_DEC_21152 0x0024 #define PCI_DEVICE_ID_DEC_21153 0x0025 #define PCI_DEVICE_ID_DEC_21154 0x0026 #define PCI_DEVICE_ID_DEC_21285 0x1065 #define PCI_DEVICE_ID_DEC_21554 0x0046 #define PCI_DEVICE_ID_COMPAQ_42XX 0x0046 #define PCI_VENDOR_ID_CIRRUS 0x1013 #define PCI_DEVICE_ID_CIRRUS_7548 0x0038 #define PCI_DEVICE_ID_CIRRUS_5430 0x00a0 #define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4 #define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8 #define PCI_DEVICE_ID_CIRRUS_5436 0x00ac #define PCI_DEVICE_ID_CIRRUS_5446 0x00b8 #define PCI_DEVICE_ID_CIRRUS_5480 0x00bc #define PCI_DEVICE_ID_CIRRUS_5464 0x00d4 #define PCI_DEVICE_ID_CIRRUS_5465 0x00d6 #define PCI_DEVICE_ID_CIRRUS_6729 0x1100 #define PCI_DEVICE_ID_CIRRUS_6832 0x1110 #define PCI_DEVICE_ID_CIRRUS_7542 0x1200 #define PCI_DEVICE_ID_CIRRUS_7543 0x1202 #define PCI_DEVICE_ID_CIRRUS_7541 0x1204 #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_DEVICE_ID_IBM_FIRE_CORAL 0x000a #define PCI_DEVICE_ID_IBM_TR 0x0018 #define PCI_DEVICE_ID_IBM_82G2675 0x001d #define PCI_DEVICE_ID_IBM_MCA 0x0020 #define PCI_DEVICE_ID_IBM_82351 0x0022 #define PCI_DEVICE_ID_IBM_PYTHON 0x002d #define PCI_DEVICE_ID_IBM_SERVERAID 0x002e #define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e #define PCI_DEVICE_ID_IBM_MPIC 0x0046 #define PCI_DEVICE_ID_IBM_3780IDSP 0x007d #define PCI_DEVICE_ID_IBM_MPIC_2 0xffff #define PCI_VENDOR_ID_WD 0x101c #define PCI_DEVICE_ID_WD_7197 0x3296 #define PCI_VENDOR_ID_AMD 0x1022 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 #define PCI_DEVICE_ID_AMD_SCSI 0x2020 #define PCI_VENDOR_ID_TRIDENT 0x1023 #define PCI_DEVICE_ID_TRIDENT_9397 0x9397 #define PCI_DEVICE_ID_TRIDENT_9420 0x9420 #define PCI_DEVICE_ID_TRIDENT_9440 0x9440 #define PCI_DEVICE_ID_TRIDENT_9660 0x9660 #define PCI_DEVICE_ID_TRIDENT_9750 0x9750 #define PCI_VENDOR_ID_AI 0x1025 #define PCI_DEVICE_ID_AI_M1435 0x1435 #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 #define PCI_DEVICE_ID_MATROX_MIL 0x0519 #define PCI_DEVICE_ID_MATROX_MYS 0x051A #define PCI_DEVICE_ID_MATROX_MIL_2 0x051b #define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f #define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520 #define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521 #define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10 #define PCI_DEVICE_ID_MATROX_G100_MM 0x1000 #define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001 #define PCI_VENDOR_ID_CT 0x102c #define PCI_DEVICE_ID_CT_65545 0x00d8 #define PCI_DEVICE_ID_CT_65548 0x00dc #define PCI_DEVICE_ID_CT_65550 0x00e0 #define PCI_DEVICE_ID_CT_65554 0x00e4 #define PCI_DEVICE_ID_CT_65555 0x00e5 #define PCI_VENDOR_ID_MIRO 0x1031 #define PCI_DEVICE_ID_MIRO_36050 0x5601 #define PCI_VENDOR_ID_NEC 0x1033 #define PCI_DEVICE_ID_NEC_PCX2 0x0046 #define PCI_VENDOR_ID_FD 0x1036 #define PCI_DEVICE_ID_FD_36C70 0x0000 #define PCI_VENDOR_ID_SI 0x1039 #define PCI_DEVICE_ID_SI_5591_AGP 0x0001 #define PCI_DEVICE_ID_SI_6202 0x0002 #define PCI_DEVICE_ID_SI_503 0x0008 #define PCI_DEVICE_ID_SI_ACPI 0x0009 #define PCI_DEVICE_ID_SI_5597_VGA 0x0200 #define PCI_DEVICE_ID_SI_6205 0x0205 #define PCI_DEVICE_ID_SI_501 0x0406 #define PCI_DEVICE_ID_SI_496 0x0496 #define PCI_DEVICE_ID_SI_601 0x0601 #define PCI_DEVICE_ID_SI_5107 0x5107 #define PCI_DEVICE_ID_SI_5511 0x5511 #define PCI_DEVICE_ID_SI_5513 0x5513 #define PCI_DEVICE_ID_SI_5571 0x5571 #define PCI_DEVICE_ID_SI_5591 0x5591 #define PCI_DEVICE_ID_SI_5597 0x5597 #define PCI_DEVICE_ID_SI_7001 0x7001 #define PCI_VENDOR_ID_HP 0x103c #define PCI_DEVICE_ID_HP_J2585A 0x1030 #define PCI_DEVICE_ID_HP_J2585B 0x1031 #define PCI_VENDOR_ID_PCTECH 0x1042 #define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000 #define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001 #define PCI_DEVICE_ID_PCTECH_SAMURAI_0 0x3000 #define PCI_DEVICE_ID_PCTECH_SAMURAI_1 0x3010 #define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020 #define PCI_VENDOR_ID_DPT 0x1044 #define PCI_DEVICE_ID_DPT 0xa400 #define PCI_VENDOR_ID_OPTI 0x1045 #define PCI_DEVICE_ID_OPTI_92C178 0xc178 #define PCI_DEVICE_ID_OPTI_82C557 0xc557 #define PCI_DEVICE_ID_OPTI_82C558 0xc558 #define PCI_DEVICE_ID_OPTI_82C621 0xc621 #define PCI_DEVICE_ID_OPTI_82C700 0xc700 #define PCI_DEVICE_ID_OPTI_82C701 0xc701 #define PCI_DEVICE_ID_OPTI_82C814 0xc814 #define PCI_DEVICE_ID_OPTI_82C822 0xc822 #define PCI_DEVICE_ID_OPTI_82C861 0xc861 #define PCI_DEVICE_ID_OPTI_82C825 0xd568 #define PCI_VENDOR_ID_SGS 0x104a #define PCI_DEVICE_ID_SGS_2000 0x0008 #define PCI_DEVICE_ID_SGS_1764 0x0009 #define PCI_VENDOR_ID_BUSLOGIC 0x104B #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 #define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130 #define PCI_VENDOR_ID_TI 0x104c #define PCI_DEVICE_ID_TI_TVP4010 0x3d04 #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_PCI1130 0xac12 #define PCI_DEVICE_ID_TI_PCI1031 0xac13 #define PCI_DEVICE_ID_TI_PCI1131 0xac15 #define PCI_DEVICE_ID_TI_PCI1250 0xac16 #define PCI_DEVICE_ID_TI_PCI1220 0xac17 #define PCI_VENDOR_ID_OAK 0x104e #define PCI_DEVICE_ID_OAK_OTI107 0x0107 /* Winbond have two vendor IDs! See 0x10ad as well */ #define PCI_VENDOR_ID_WINBOND2 0x1050 #define PCI_DEVICE_ID_WINBOND2_89C940 0x0940 #define PCI_VENDOR_ID_MOTOROLA 0x1057 #define PCI_VENDOR_ID_MOTOROLA_OOPS 0x1507 #define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001 #define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002 #define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801 #define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802 #define PCI_DEVICE_ID_MOTOROLA_CPX8216 0x4806 #define PCI_VENDOR_ID_PROMISE 0x105a #define PCI_DEVICE_ID_PROMISE_20246 0x4d33 #define PCI_DEVICE_ID_PROMISE_5300 0x5300 #define PCI_VENDOR_ID_N9 0x105d #define PCI_DEVICE_ID_N9_I128 0x2309 #define PCI_DEVICE_ID_N9_I128_2 0x2339 #define PCI_DEVICE_ID_N9_I128_T2R 0x493d #define PCI_VENDOR_ID_UMC 0x1060 #define PCI_DEVICE_ID_UMC_UM8673F 0x0101 #define PCI_DEVICE_ID_UMC_UM8891A 0x0891 #define PCI_DEVICE_ID_UMC_UM8886BF 0x673a #define PCI_DEVICE_ID_UMC_UM8886A 0x886a #define PCI_DEVICE_ID_UMC_UM8881F 0x8881 #define PCI_DEVICE_ID_UMC_UM8886F 0x8886 #define PCI_DEVICE_ID_UMC_UM9017F 0x9017 #define PCI_DEVICE_ID_UMC_UM8886N 0xe886 #define PCI_DEVICE_ID_UMC_UM8891N 0xe891 #define PCI_VENDOR_ID_X 0x1061 #define PCI_DEVICE_ID_X_AGX016 0x0001 #define PCI_VENDOR_ID_PICOP 0x1066 #define PCI_DEVICE_ID_PICOP_PT86C52X 0x0001 #define PCI_DEVICE_ID_PICOP_PT80C524 0x8002 #define PCI_VENDOR_ID_MYLEX 0x1069 #define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001 #define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002 #define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010 #define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020 #define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050 #define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56 #define PCI_VENDOR_ID_APPLE 0x106b #define PCI_DEVICE_ID_APPLE_BANDIT 0x0001 #define PCI_DEVICE_ID_APPLE_GC 0x0002 #define PCI_DEVICE_ID_APPLE_HYDRA 0x000e #define PCI_VENDOR_ID_NEXGEN 0x1074 #define PCI_DEVICE_ID_NEXGEN_82C501 0x4e78 #define PCI_VENDOR_ID_QLOGIC 0x1077 #define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 #define PCI_DEVICE_ID_QLOGIC_ISP1022 0x1022 #define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 #define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 #define PCI_VENDOR_ID_CYRIX 0x1078 #define PCI_DEVICE_ID_CYRIX_5510 0x0000 #define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001 #define PCI_DEVICE_ID_CYRIX_5520 0x0002 #define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100 #define PCI_DEVICE_ID_CYRIX_5530_SMI 0x0101 #define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102 #define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103 #define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104 #define PCI_VENDOR_ID_LEADTEK 0x107d #define PCI_DEVICE_ID_LEADTEK_805 0x0000 #define PCI_VENDOR_ID_CONTAQ 0x1080 #define PCI_DEVICE_ID_CONTAQ_82C599 0x0600 #define PCI_DEVICE_ID_CONTAQ_82C693 0xc693 #define PCI_VENDOR_ID_FOREX 0x1083 #define PCI_VENDOR_ID_OLICOM 0x108d #define PCI_DEVICE_ID_OLICOM_OC3136 0x0001 #define PCI_DEVICE_ID_OLICOM_OC2315 0x0011 #define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 #define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 #define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 #define PCI_DEVICE_ID_OLICOM_OC6151 0x0021 #define PCI_VENDOR_ID_SUN 0x108e #define PCI_DEVICE_ID_SUN_EBUS 0x1000 #define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001 #define PCI_DEVICE_ID_SUN_SIMBA 0x5000 #define PCI_DEVICE_ID_SUN_PBM 0x8000 #define PCI_DEVICE_ID_SUN_SABRE 0xa000 #define PCI_VENDOR_ID_CMD 0x1095 #define PCI_DEVICE_ID_CMD_640 0x0640 #define PCI_DEVICE_ID_CMD_643 0x0643 #define PCI_DEVICE_ID_CMD_646 0x0646 #define PCI_DEVICE_ID_CMD_647 0x0647 #define PCI_DEVICE_ID_CMD_670 0x0670 #define PCI_VENDOR_ID_VISION 0x1098 #define PCI_DEVICE_ID_VISION_QD8500 0x0001 #define PCI_DEVICE_ID_VISION_QD8580 0x0002 #define PCI_VENDOR_ID_BROOKTREE 0x109e #define PCI_DEVICE_ID_BROOKTREE_848 0x0350 #define PCI_DEVICE_ID_BROOKTREE_849A 0x0351 #define PCI_DEVICE_ID_BROOKTREE_878_1 0x036e #define PCI_DEVICE_ID_BROOKTREE_878 0x0878 #define PCI_DEVICE_ID_BROOKTREE_8474 0x8474 #define PCI_VENDOR_ID_SIERRA 0x10a8 #define PCI_DEVICE_ID_SIERRA_STB 0x0000 #define PCI_VENDOR_ID_ACC 0x10aa #define PCI_DEVICE_ID_ACC_2056 0x0000 #define PCI_VENDOR_ID_WINBOND 0x10ad #define PCI_DEVICE_ID_WINBOND_83769 0x0001 #define PCI_DEVICE_ID_WINBOND_82C105 0x0105 #define PCI_DEVICE_ID_WINBOND_83C553 0x0565 #define PCI_VENDOR_ID_DATABOOK 0x10b3 #define PCI_DEVICE_ID_DATABOOK_87144 0xb106 #define PCI_VENDOR_ID_PLX 0x10b5 #define PCI_DEVICE_ID_PLX_9050 0x9050 #define PCI_DEVICE_ID_PLX_9060 0x9060 #define PCI_DEVICE_ID_PLX_9060ES 0x906E #define PCI_DEVICE_ID_PLX_9060SD 0x906D #define PCI_DEVICE_ID_PLX_9080 0x9080 #define PCI_VENDOR_ID_MADGE 0x10b6 #define PCI_DEVICE_ID_MADGE_MK2 0x0002 #define PCI_DEVICE_ID_MADGE_C155S 0x1001 #define PCI_VENDOR_ID_3COM 0x10b7 #define PCI_DEVICE_ID_3COM_3C985 0x0001 #define PCI_DEVICE_ID_3COM_3C339 0x3390 #define PCI_DEVICE_ID_3COM_3C590 0x5900 #define PCI_DEVICE_ID_3COM_3C595TX 0x5950 #define PCI_DEVICE_ID_3COM_3C595T4 0x5951 #define PCI_DEVICE_ID_3COM_3C595MII 0x5952 #define PCI_DEVICE_ID_3COM_3C900TPO 0x9000 #define PCI_DEVICE_ID_3COM_3C900COMBO 0x9001 #define PCI_DEVICE_ID_3COM_3C905TX 0x9050 #define PCI_DEVICE_ID_3COM_3C905T4 0x9051 #define PCI_DEVICE_ID_3COM_3C905B_TX 0x9055 #define PCI_VENDOR_ID_SMC 0x10b8 #define PCI_DEVICE_ID_SMC_EPIC100 0x0005 #define PCI_VENDOR_ID_AL 0x10b9 #define PCI_DEVICE_ID_AL_M1445 0x1445 #define PCI_DEVICE_ID_AL_M1449 0x1449 #define PCI_DEVICE_ID_AL_M1451 0x1451 #define PCI_DEVICE_ID_AL_M1461 0x1461 #define PCI_DEVICE_ID_AL_M1489 0x1489 #define PCI_DEVICE_ID_AL_M1511 0x1511 #define PCI_DEVICE_ID_AL_M1513 0x1513 #define PCI_DEVICE_ID_AL_M1521 0x1521 #define PCI_DEVICE_ID_AL_M1523 0x1523 #define PCI_DEVICE_ID_AL_M1531 0x1531 #define PCI_DEVICE_ID_AL_M1533 0x1533 #define PCI_DEVICE_ID_AL_M3307 0x3307 #define PCI_DEVICE_ID_AL_M4803 0x5215 #define PCI_DEVICE_ID_AL_M5219 0x5219 #define PCI_DEVICE_ID_AL_M5229 0x5229 #define PCI_DEVICE_ID_AL_M5237 0x5237 #define PCI_DEVICE_ID_AL_M7101 0x7101 #define PCI_VENDOR_ID_MITSUBISHI 0x10ba #define PCI_VENDOR_ID_SURECOM 0x10bd #define PCI_DEVICE_ID_SURECOM_NE34 0x0e34 #define PCI_VENDOR_ID_NEOMAGIC 0x10c8 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2070 0x0001 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128V 0x0002 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZV 0x0003 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2160 0x0004 #define PCI_DEVICE_ID_NEOMAGIC_MAGICMEDIA_256AV 0x0005 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZVPLUS 0x0083 #define PCI_VENDOR_ID_ASP 0x10cd #define PCI_DEVICE_ID_ASP_ABP940 0x1200 #define PCI_DEVICE_ID_ASP_ABP940U 0x1300 #define PCI_DEVICE_ID_ASP_ABP940UW 0x2300 #define PCI_VENDOR_ID_MACRONIX 0x10d9 #define PCI_DEVICE_ID_MACRONIX_MX98713 0x0512 #define PCI_DEVICE_ID_MACRONIX_MX987x5 0x0531 #define PCI_VENDOR_ID_CERN 0x10dc #define PCI_DEVICE_ID_CERN_SPSB_PMC 0x0001 #define PCI_DEVICE_ID_CERN_SPSB_PCI 0x0002 #define PCI_DEVICE_ID_CERN_HIPPI_DST 0x0021 #define PCI_DEVICE_ID_CERN_HIPPI_SRC 0x0022 #define PCI_VENDOR_ID_NVIDIA 0x10de #define PCI_VENDOR_ID_IMS 0x10e0 #define PCI_DEVICE_ID_IMS_8849 0x8849 #define PCI_VENDOR_ID_TEKRAM2 0x10e1 #define PCI_DEVICE_ID_TEKRAM2_690c 0x690c #define PCI_VENDOR_ID_TUNDRA 0x10e3 #define PCI_DEVICE_ID_TUNDRA_CA91C042 0x0000 #define PCI_VENDOR_ID_AMCC 0x10e8 #define PCI_DEVICE_ID_AMCC_MYRINET 0x8043 #define PCI_DEVICE_ID_AMCC_PARASTATION 0x8062 #define PCI_DEVICE_ID_AMCC_S5933 0x807d #define PCI_DEVICE_ID_AMCC_S5933_HEPC3 0x809c #define PCI_VENDOR_ID_INTERG 0x10ea #define PCI_DEVICE_ID_INTERG_1680 0x1680 #define PCI_DEVICE_ID_INTERG_1682 0x1682 #define PCI_VENDOR_ID_REALTEK 0x10ec #define PCI_DEVICE_ID_REALTEK_8029 0x8029 #define PCI_DEVICE_ID_REALTEK_8129 0x8129 #define PCI_DEVICE_ID_REALTEK_8139 0x8139 #define PCI_VENDOR_ID_TRUEVISION 0x10fa #define PCI_DEVICE_ID_TRUEVISION_T1000 0x000c #define PCI_VENDOR_ID_INIT 0x1101 #define PCI_DEVICE_ID_INIT_320P 0x9100 #define PCI_DEVICE_ID_INIT_360P 0x9500 #define PCI_VENDOR_ID_TTI 0x1103 #define PCI_DEVICE_ID_TTI_HPT343 0x0003 #define PCI_VENDOR_ID_VIA 0x1106 #define PCI_DEVICE_ID_VIA_82C505 0x0505 #define PCI_DEVICE_ID_VIA_82C561 0x0561 #define PCI_DEVICE_ID_VIA_82C586_1 0x0571 #define PCI_DEVICE_ID_VIA_82C576 0x0576 #define PCI_DEVICE_ID_VIA_82C585 0x0585 #define PCI_DEVICE_ID_VIA_82C586_0 0x0586 #define PCI_DEVICE_ID_VIA_82C595 0x0595 #define PCI_DEVICE_ID_VIA_82C596_0 0x0596 #define PCI_DEVICE_ID_VIA_82C597_0 0x0597 #define PCI_DEVICE_ID_VIA_82C598_0 0x0598 #define PCI_DEVICE_ID_VIA_82C926 0x0926 #define PCI_DEVICE_ID_VIA_82C416 0x1571 #define PCI_DEVICE_ID_VIA_82C595_97 0x1595 #define PCI_DEVICE_ID_VIA_82C586_2 0x3038 #define PCI_DEVICE_ID_VIA_82C586_3 0x3040 #define PCI_DEVICE_ID_VIA_82C686_5 0x3058 #define PCI_DEVICE_ID_VIA_86C100A 0x6100 #define PCI_DEVICE_ID_VIA_82C597_1 0x8597 #define PCI_DEVICE_ID_VIA_82C598_1 0x8598 #define PCI_VENDOR_ID_SMC2 0x1113 #define PCI_DEVICE_ID_SMC2_1211TX 0x1211 #define PCI_VENDOR_ID_VORTEX 0x1119 #define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000 #define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001 #define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002 #define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003 #define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004 #define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005 #define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006 #define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007 #define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008 #define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009 #define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a #define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b #define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c #define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d #define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100 #define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101 #define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102 #define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103 #define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104 #define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105 #define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x0110 #define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x0111 #define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x0112 #define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x0113 #define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x0114 #define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x0115 #define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x0120 #define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x0121 #define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x0122 #define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x0123 #define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x0124 #define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x0125 #define PCI_VENDOR_ID_EF 0x111a #define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000 #define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002 #define PCI_VENDOR_ID_FORE 0x1127 #define PCI_DEVICE_ID_FORE_PCA200PC 0x0210 #define PCI_DEVICE_ID_FORE_PCA200E 0x0300 #define PCI_VENDOR_ID_IMAGINGTECH 0x112f #define PCI_DEVICE_ID_IMAGINGTECH_ICPCI 0x0000 #define PCI_VENDOR_ID_PHILIPS 0x1131 #define PCI_DEVICE_ID_PHILIPS_SAA7145 0x7145 #define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146 #define PCI_VENDOR_ID_CYCLONE 0x113c #define PCI_DEVICE_ID_CYCLONE_SDK 0x0001 #define PCI_VENDOR_ID_ALLIANCE 0x1142 #define PCI_DEVICE_ID_ALLIANCE_PROMOTIO 0x3210 #define PCI_DEVICE_ID_ALLIANCE_PROVIDEO 0x6422 #define PCI_DEVICE_ID_ALLIANCE_AT24 0x6424 #define PCI_DEVICE_ID_ALLIANCE_AT3D 0x643d #define PCI_VENDOR_ID_SYSKONNECT 0x1148 #define PCI_DEVICE_ID_SYSKONNECT_FP 0x4000 #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 #define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300 #define PCI_VENDOR_ID_VMIC 0x114a #define PCI_DEVICE_ID_VMIC_VME 0x7587 #define PCI_VENDOR_ID_DIGI 0x114f #define PCI_DEVICE_ID_DIGI_EPC 0x0002 #define PCI_DEVICE_ID_DIGI_RIGHTSWITCH 0x0003 #define PCI_DEVICE_ID_DIGI_XEM 0x0004 #define PCI_DEVICE_ID_DIGI_XR 0x0005 #define PCI_DEVICE_ID_DIGI_CX 0x0006 #define PCI_DEVICE_ID_DIGI_XRJ 0x0009 #define PCI_DEVICE_ID_DIGI_EPCJ 0x000a #define PCI_DEVICE_ID_DIGI_XR_920 0x0027 #define PCI_VENDOR_ID_MUTECH 0x1159 #define PCI_DEVICE_ID_MUTECH_MV1000 0x0001 #define PCI_VENDOR_ID_RENDITION 0x1163 #define PCI_DEVICE_ID_RENDITION_VERITE 0x0001 #define PCI_DEVICE_ID_RENDITION_VERITE2100 0x2000 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 #define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008 #define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 #define PCI_DEVICE_ID_SERVERWORKS_CIOB30 0x0010 #define PCI_DEVICE_ID_SERVERWORKS_CMIC_HE 0x0011 #define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 #define PCI_VENDOR_ID_SBE 0x1176 #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 #define PCI_DEVICE_ID_SBE_WANXL200 0x0302 #define PCI_DEVICE_ID_SBE_WANXL400 0x0104 #define PCI_VENDOR_ID_TOSHIBA 0x1179 #define PCI_DEVICE_ID_TOSHIBA_601 0x0601 #define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a #define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f #define PCI_VENDOR_ID_RICOH 0x1180 #define PCI_DEVICE_ID_RICOH_RL5C465 0x0465 #define PCI_DEVICE_ID_RICOH_RL5C466 0x0466 #define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 #define PCI_VENDOR_ID_ARTOP 0x1191 #define PCI_DEVICE_ID_ARTOP_ATP8400 0x0004 #define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005 #define PCI_VENDOR_ID_ZEITNET 0x1193 #define PCI_DEVICE_ID_ZEITNET_1221 0x0001 #define PCI_DEVICE_ID_ZEITNET_1225 0x0002 #define PCI_VENDOR_ID_OMEGA 0x119b #define PCI_DEVICE_ID_OMEGA_82C092G 0x1221 #define PCI_VENDOR_ID_GALILEO 0x11ab #define PCI_DEVICE_ID_GALILEO_GT64011 0x4146 #define PCI_VENDOR_ID_LITEON 0x11ad #define PCI_DEVICE_ID_LITEON_LNE100TX 0x0002 #define PCI_VENDOR_ID_NP 0x11bc #define PCI_DEVICE_ID_NP_PCI_FDDI 0x0001 #define PCI_VENDOR_ID_ATT 0x11c1 #define PCI_DEVICE_ID_ATT_L56XMF 0x0440 #define PCI_DEVICE_ID_ATT_L56DVP 0x0480 #define PCI_VENDOR_ID_SPECIALIX 0x11cb #define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000 #define PCI_DEVICE_ID_SPECIALIX_XIO 0x4000 #define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000 #define PCI_VENDOR_ID_AURAVISION 0x11d1 #define PCI_DEVICE_ID_AURAVISION_VXP524 0x01f7 #define PCI_VENDOR_ID_IKON 0x11d5 #define PCI_DEVICE_ID_IKON_10115 0x0115 #define PCI_DEVICE_ID_IKON_10117 0x0117 #define PCI_VENDOR_ID_ZORAN 0x11de #define PCI_DEVICE_ID_ZORAN_36057 0x6057 #define PCI_DEVICE_ID_ZORAN_36120 0x6120 #define PCI_VENDOR_ID_KINETIC 0x11f4 #define PCI_DEVICE_ID_KINETIC_2915 0x2915 #define PCI_VENDOR_ID_COMPEX 0x11f6 #define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 #define PCI_DEVICE_ID_COMPEX_RL2000 0x1401 #define PCI_VENDOR_ID_RP 0x11fe #define PCI_DEVICE_ID_RP32INTF 0x0001 #define PCI_DEVICE_ID_RP8INTF 0x0002 #define PCI_DEVICE_ID_RP16INTF 0x0003 #define PCI_DEVICE_ID_RP4QUAD 0x0004 #define PCI_DEVICE_ID_RP8OCTA 0x0005 #define PCI_DEVICE_ID_RP8J 0x0006 #define PCI_DEVICE_ID_RPP4 0x000A #define PCI_DEVICE_ID_RPP8 0x000B #define PCI_DEVICE_ID_RP8M 0x000C #define PCI_VENDOR_ID_CYCLADES 0x120e #define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 #define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 #define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 #define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 #define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 #define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 #define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 #define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 #define PCI_DEVICE_ID_PC300_RX_2 0x0300 #define PCI_DEVICE_ID_PC300_RX_1 0x0301 #define PCI_DEVICE_ID_PC300_TE_2 0x0310 #define PCI_DEVICE_ID_PC300_TE_1 0x0311 #define PCI_VENDOR_ID_ESSENTIAL 0x120f #define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001 #define PCI_VENDOR_ID_O2 0x1217 #define PCI_DEVICE_ID_O2_6729 0x6729 #define PCI_DEVICE_ID_O2_6730 0x673a #define PCI_DEVICE_ID_O2_6832 0x6832 #define PCI_DEVICE_ID_O2_6836 0x6836 #define PCI_VENDOR_ID_3DFX 0x121a #define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 #define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002 #define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003 #define PCI_VENDOR_ID_SIGMADES 0x1236 #define PCI_DEVICE_ID_SIGMADES_6425 0x6401 #define PCI_VENDOR_ID_CCUBE 0x123f #define PCI_VENDOR_ID_AVM 0x1244 #define PCI_DEVICE_ID_AVM_A1 0x0a00 #define PCI_VENDOR_ID_DIPIX 0x1246 #define PCI_VENDOR_ID_STALLION 0x124d #define PCI_DEVICE_ID_STALLION_ECHPCI832 0x0000 #define PCI_DEVICE_ID_STALLION_ECHPCI864 0x0002 #define PCI_DEVICE_ID_STALLION_EIOPCI 0x0003 #define PCI_VENDOR_ID_OPTIBASE 0x1255 #define PCI_DEVICE_ID_OPTIBASE_FORGE 0x1110 #define PCI_DEVICE_ID_OPTIBASE_FUSION 0x1210 #define PCI_DEVICE_ID_OPTIBASE_VPLEX 0x2110 #define PCI_DEVICE_ID_OPTIBASE_VPLEXCC 0x2120 #define PCI_DEVICE_ID_OPTIBASE_VQUEST 0x2130 #define PCI_VENDOR_ID_SATSAGEM 0x1267 #define PCI_DEVICE_ID_SATSAGEM_PCR2101 0x5352 #define PCI_DEVICE_ID_SATSAGEM_TELSATTURBO 0x5a4b #define PCI_VENDOR_ID_HUGHES 0x1273 #define PCI_DEVICE_ID_HUGHES_DIRECPC 0x0002 #define PCI_VENDOR_ID_ENSONIQ 0x1274 #define PCI_DEVICE_ID_ENSONIQ_AUDIOPCI 0x5000 #define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 #define PCI_VENDOR_ID_ALTEON 0x12ae #define PCI_DEVICE_ID_ALTEON_ACENIC 0x0001 #define PCI_VENDOR_ID_PICTUREL 0x12c5 #define PCI_DEVICE_ID_PICTUREL_PCIVST 0x0081 #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 #define PCI_VENDOR_ID_CBOARDS 0x1307 #define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001 #define PCI_VENDOR_ID_SIIG 0x131f #define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010 #define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011 #define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012 #define PCI_DEVICE_ID_SIIG_1P_10x 0x1020 #define PCI_DEVICE_ID_SIIG_2P_10x 0x1021 #define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034 #define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035 #define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036 #define PCI_DEVICE_ID_SIIG_1P_20x 0x2020 #define PCI_DEVICE_ID_SIIG_2P_20x 0x2021 #define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040 #define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041 #define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042 #define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010 #define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011 #define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012 #define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 #define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 #define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 #define PCI_VENDOR_ID_NETGEAR 0x1385 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a #define PCI_VENDOR_ID_LAVA 0x1407 #define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000 #define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */ #define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */ #define PCI_VENDOR_ID_TIMEDIA 0x1409 #define PCI_DEVICE_ID_TIMEDIA_1889 0x7168 #define PCI_DEVICE_ID_TIMEDIA_4008A 0x7268 #define PCI_VENDOR_ID_AFAVLAB 0x14db #define PCI_DEVICE_ID_AFAVLAB_TK9902 0x2120 #define PCI_VENDOR_ID_SYMPHONY 0x1c1c #define PCI_DEVICE_ID_SYMPHONY_101 0x0001 #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 #define PCI_VENDOR_ID_3DLABS 0x3d3d #define PCI_DEVICE_ID_3DLABS_300SX 0x0001 #define PCI_DEVICE_ID_3DLABS_500TX 0x0002 #define PCI_DEVICE_ID_3DLABS_DELTA 0x0003 #define PCI_DEVICE_ID_3DLABS_PERMEDIA 0x0004 #define PCI_DEVICE_ID_3DLABS_MX 0x0006 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 #define PCI_DEVICE_ID_3DLABS_GAMMA 0x0008 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 #define PCI_VENDOR_ID_AVANCE 0x4005 #define PCI_DEVICE_ID_AVANCE_ALG2064 0x2064 #define PCI_DEVICE_ID_AVANCE_2302 0x2302 #define PCI_VENDOR_ID_NETVIN 0x4a14 #define PCI_DEVICE_ID_NETVIN_NV5000SC 0x5000 #define PCI_VENDOR_ID_S3 0x5333 #define PCI_DEVICE_ID_S3_PLATO_PXS 0x0551 #define PCI_DEVICE_ID_S3_ViRGE 0x5631 #define PCI_DEVICE_ID_S3_TRIO 0x8811 #define PCI_DEVICE_ID_S3_AURORA64VP 0x8812 #define PCI_DEVICE_ID_S3_TRIO64UVP 0x8814 #define PCI_DEVICE_ID_S3_ViRGE_VX 0x883d #define PCI_DEVICE_ID_S3_868 0x8880 #define PCI_DEVICE_ID_S3_928 0x88b0 #define PCI_DEVICE_ID_S3_864_1 0x88c0 #define PCI_DEVICE_ID_S3_864_2 0x88c1 #define PCI_DEVICE_ID_S3_964_1 0x88d0 #define PCI_DEVICE_ID_S3_964_2 0x88d1 #define PCI_DEVICE_ID_S3_968 0x88f0 #define PCI_DEVICE_ID_S3_TRIO64V2 0x8901 #define PCI_DEVICE_ID_S3_PLATO_PXG 0x8902 #define PCI_DEVICE_ID_S3_ViRGE_DXGX 0x8a01 #define PCI_DEVICE_ID_S3_ViRGE_GX2 0x8a10 #define PCI_DEVICE_ID_S3_ViRGE_MX 0x8c01 #define PCI_DEVICE_ID_S3_ViRGE_MXP 0x8c02 #define PCI_DEVICE_ID_S3_ViRGE_MXPMV 0x8c03 #define PCI_DEVICE_ID_S3_SONICVIBES 0xca00 #define PCI_VENDOR_ID_DCI 0x6666 #define PCI_DEVICE_ID_DCI_PCCOM4 0x0001 #define PCI_VENDOR_ID_GENROCO 0x5555 #define PCI_DEVICE_ID_GENROCO_HFP832 0x0003 #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_21145 0x0039 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 #define PCI_DEVICE_ID_INTEL_82430 0x0486 #define PCI_DEVICE_ID_INTEL_82434 0x04a3 #define PCI_DEVICE_ID_INTEL_I960 0x0960 #define PCI_DEVICE_ID_INTEL_I960RN 0x0964 #define PCI_DEVICE_ID_INTEL_82559ER 0x1209 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222 #define PCI_DEVICE_ID_INTEL_7116 0x1223 #define PCI_DEVICE_ID_INTEL_82596 0x1226 #define PCI_DEVICE_ID_INTEL_82865 0x1227 #define PCI_DEVICE_ID_INTEL_82557 0x1229 #define PCI_DEVICE_ID_INTEL_82437 0x122d #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e #define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230 #define PCI_DEVICE_ID_INTEL_82371MX 0x1234 #define PCI_DEVICE_ID_INTEL_82437MX 0x1235 #define PCI_DEVICE_ID_INTEL_82441 0x1237 #define PCI_DEVICE_ID_INTEL_82380FB 0x124b #define PCI_DEVICE_ID_INTEL_82439 0x1250 #define PCI_DEVICE_ID_INTEL_MEGARAID 0x1960 #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 #define PCI_DEVICE_ID_INTEL_82437VX 0x7030 #define PCI_DEVICE_ID_INTEL_82439TX 0x7100 #define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 #define PCI_DEVICE_ID_INTEL_82371AB 0x7111 #define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 #define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 #define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180 #define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181 #define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190 #define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 #define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 #define PCI_DEVICE_ID_INTEL_P6 0x84c4 #define PCI_DEVICE_ID_INTEL_82450GX 0x84c4 #define PCI_DEVICE_ID_INTEL_82453GX 0x84c5 #define PCI_DEVICE_ID_INTEL_82451NX 0x84ca #define PCI_DEVICE_ID_INTEL_82454NX 0x84cb #define PCI_VENDOR_ID_COMPUTONE 0x8e0e #define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291 #define PCI_VENDOR_ID_KTI 0x8e2e #define PCI_DEVICE_ID_KTI_ET32P2 0x3000 #define PCI_VENDOR_ID_ADAPTEC 0x9004 #define PCI_DEVICE_ID_ADAPTEC_7810 0x1078 #define PCI_DEVICE_ID_ADAPTEC_7821 0x2178 #define PCI_DEVICE_ID_ADAPTEC_38602 0x3860 #define PCI_DEVICE_ID_ADAPTEC_7850 0x5078 #define PCI_DEVICE_ID_ADAPTEC_7855 0x5578 #define PCI_DEVICE_ID_ADAPTEC_5800 0x5800 #define PCI_DEVICE_ID_ADAPTEC_3860 0x6038 #define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075 #define PCI_DEVICE_ID_ADAPTEC_7860 0x6078 #define PCI_DEVICE_ID_ADAPTEC_7861 0x6178 #define PCI_DEVICE_ID_ADAPTEC_7870 0x7078 #define PCI_DEVICE_ID_ADAPTEC_7871 0x7178 #define PCI_DEVICE_ID_ADAPTEC_7872 0x7278 #define PCI_DEVICE_ID_ADAPTEC_7873 0x7378 #define PCI_DEVICE_ID_ADAPTEC_7874 0x7478 #define PCI_DEVICE_ID_ADAPTEC_7895 0x7895 #define PCI_DEVICE_ID_ADAPTEC_7880 0x8078 #define PCI_DEVICE_ID_ADAPTEC_7881 0x8178 #define PCI_DEVICE_ID_ADAPTEC_7882 0x8278 #define PCI_DEVICE_ID_ADAPTEC_7883 0x8378 #define PCI_DEVICE_ID_ADAPTEC_7884 0x8478 #define PCI_DEVICE_ID_ADAPTEC_7885 0x8578 #define PCI_DEVICE_ID_ADAPTEC_7886 0x8678 #define PCI_DEVICE_ID_ADAPTEC_7887 0x8778 #define PCI_DEVICE_ID_ADAPTEC_7888 0x8878 #define PCI_DEVICE_ID_ADAPTEC_1030 0x8b78 #define PCI_VENDOR_ID_ADAPTEC2 0x9005 #define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010 #define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011 #define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013 #define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f #define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050 #define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051 #define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f #define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080 #define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081 #define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083 #define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f #define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0 #define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1 #define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3 #define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf #define PCI_VENDOR_ID_ATRONICS 0x907f #define PCI_DEVICE_ID_ATRONICS_2015 0x2015 #define PCI_VENDOR_ID_HOLTEK 0x9412 #define PCI_DEVICE_ID_HOLTEK_6565 0x6565 #define PCI_VENDOR_ID_TIGERJET 0xe159 #define PCI_DEVICE_ID_TIGERJET_300 0x0001 #define PCI_VENDOR_ID_ARK 0xedd8 #define PCI_DEVICE_ID_ARK_STING 0xa091 #define PCI_DEVICE_ID_ARK_STINGARK 0xa099 #define PCI_DEVICE_ID_ARK_2000MT 0xa0a1 #define PCI_VENDOR_ID_INTERPHASE 0x107e #define PCI_DEVICE_ID_INTERPHASE_5526 0x0004 #define PCI_DEVICE_ID_INTERPHASE_55x6 0x0005 /* * The PCI interface treats multi-function devices as independent * devices. The slot/function address of each device is encoded * in a single byte as follows: * * 7:3 = slot * 2:0 = function */ #define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) #endif /* USE_2_2_17_PCI_H */ static void do_pci(void) { struct list_data pcilist_data; int devcnt, i; unsigned int class; unsigned short device, vendor; unsigned char busno; ulong *devlist, bus, devfn, prev, next; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; if (!symbol_exists("pci_devices")) error(FATAL, "no PCI devices found on this system.\n"); BZERO(&pcilist_data, sizeof(struct list_data)); if (VALID_MEMBER(pci_dev_global_list)) { get_symbol_data("pci_devices", sizeof(void *), &pcilist_data.start); pcilist_data.end = symbol_value("pci_devices"); pcilist_data.list_head_offset = OFFSET(pci_dev_global_list); readmem(symbol_value("pci_devices") + OFFSET(list_head_prev), KVADDR, &prev, sizeof(void *), "list head prev", FAULT_ON_ERROR); /* * Check if this system does not have any PCI devices. */ if ((pcilist_data.start == pcilist_data.end) && (prev == pcilist_data.end)) error(FATAL, "no PCI devices found on this system.\n"); } else if (VALID_MEMBER(pci_dev_next)) { get_symbol_data("pci_devices", sizeof(void *), &pcilist_data.start); pcilist_data.member_offset = OFFSET(pci_dev_next); /* * Check if this system does not have any PCI devices. */ readmem(pcilist_data.start + pcilist_data.member_offset, KVADDR, &next, sizeof(void *), "pci dev next", FAULT_ON_ERROR); if (!next) error(FATAL, "no PCI devices found on this system.\n"); } else option_not_supported('p'); hq_open(); devcnt = do_list(&pcilist_data); devlist = (ulong *)GETBUF(devcnt * sizeof(ulong)); devcnt = retrieve_list(devlist, devcnt); hq_close(); fprintf(fp, "%s BU:SL.FN CLASS: VENDOR-DEVICE\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "PCI_DEV")); for (i = 0; i < devcnt; i++) { /* * Get the pci bus number */ readmem(devlist[i] + OFFSET(pci_dev_bus), KVADDR, &bus, sizeof(void *), "pci bus", FAULT_ON_ERROR); readmem(bus + OFFSET(pci_bus_number), KVADDR, &busno, sizeof(char), "pci bus number", FAULT_ON_ERROR); readmem(devlist[i] + OFFSET(pci_dev_devfn), KVADDR, &devfn, sizeof(ulong), "pci devfn", FAULT_ON_ERROR); fprintf(fp, "%lx %02x:%02lx.%lx ", devlist[i], busno, PCI_SLOT(devfn), PCI_FUNC(devfn)); /* * Now read in the class, device, and vendor. */ readmem(devlist[i] + OFFSET(pci_dev_class), KVADDR, &class, sizeof(int), "pci class", FAULT_ON_ERROR); readmem(devlist[i] + OFFSET(pci_dev_device), KVADDR, &device, sizeof(short), "pci device", FAULT_ON_ERROR); readmem(devlist[i] + OFFSET(pci_dev_vendor),KVADDR, &vendor, sizeof(short), "pci vendor", FAULT_ON_ERROR); fprintf(fp, "%s: %s %s", pci_strclass(class, buf1), pci_strvendor(vendor, buf2), pci_strdev(vendor, device, buf3)); fprintf(fp, "\n"); } FREEBUF(devlist); } /* * Taken from drivers/pci/oldproc.c, kernel ver 2.2.17 */ struct pci_dev_info { unsigned short vendor; /* vendor id */ unsigned short device; /* device id */ const char *name; /* device name */ }; #define DEVICE(vid,did,name) \ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##did, (name)} /* * Sorted in ascending order by vendor and device. * Use binary search for lookup. If you add a device make sure * it is sequential by both vendor and device id. */ struct pci_dev_info dev_info[] = { DEVICE( COMPAQ, COMPAQ_1280, "QVision 1280/p"), DEVICE( COMPAQ, COMPAQ_6010, "Hot Plug PCI Bridge"), DEVICE( COMPAQ, COMPAQ_SMART2P, "Smart-2/P RAID Controller"), DEVICE( COMPAQ, COMPAQ_NETEL100,"Netelligent 10/100"), DEVICE( COMPAQ, COMPAQ_NETEL10, "Netelligent 10"), DEVICE( COMPAQ, COMPAQ_NETFLEX3I,"NetFlex 3"), DEVICE( COMPAQ, COMPAQ_NETEL100D,"Netelligent 10/100 Dual"), DEVICE( COMPAQ, COMPAQ_NETEL100PI,"Netelligent 10/100 ProLiant"), DEVICE( COMPAQ, COMPAQ_NETEL100I,"Netelligent 10/100 Integrated"), DEVICE( COMPAQ, COMPAQ_THUNDER, "ThunderLAN"), DEVICE( COMPAQ, COMPAQ_NETFLEX3B,"NetFlex 3 BNC"), DEVICE( NCR, NCR_53C810, "53c810"), DEVICE( NCR, NCR_53C820, "53c820"), DEVICE( NCR, NCR_53C825, "53c825"), DEVICE( NCR, NCR_53C815, "53c815"), DEVICE( NCR, NCR_53C860, "53c860"), DEVICE( NCR, NCR_53C896, "53c896"), DEVICE( NCR, NCR_53C895, "53c895"), DEVICE( NCR, NCR_53C885, "53c885"), DEVICE( NCR, NCR_53C875, "53c875"), DEVICE( NCR, NCR_53C875J, "53c875J"), DEVICE( ATI, ATI_68800, "68800AX"), DEVICE( ATI, ATI_215CT222, "215CT222"), DEVICE( ATI, ATI_210888CX, "210888CX"), DEVICE( ATI, ATI_215GB, "Mach64 GB"), DEVICE( ATI, ATI_215GD, "Mach64 GD (Rage Pro)"), DEVICE( ATI, ATI_215GI, "Mach64 GI (Rage Pro)"), DEVICE( ATI, ATI_215GP, "Mach64 GP (Rage Pro)"), DEVICE( ATI, ATI_215GQ, "Mach64 GQ (Rage Pro)"), DEVICE( ATI, ATI_215GT, "Mach64 GT (Rage II)"), DEVICE( ATI, ATI_215GTB, "Mach64 GT (Rage II)"), DEVICE( ATI, ATI_210888GX, "210888GX"), DEVICE( ATI, ATI_215LG, "Mach64 LG (Rage Pro)"), DEVICE( ATI, ATI_264LT, "Mach64 LT"), DEVICE( ATI, ATI_264VT, "Mach64 VT"), DEVICE( VLSI, VLSI_82C592, "82C592-FC1"), DEVICE( VLSI, VLSI_82C593, "82C593-FC1"), DEVICE( VLSI, VLSI_82C594, "82C594-AFC2"), DEVICE( VLSI, VLSI_82C597, "82C597-AFC2"), DEVICE( VLSI, VLSI_82C541, "82C541 Lynx"), DEVICE( VLSI, VLSI_82C543, "82C543 Lynx ISA"), DEVICE( VLSI, VLSI_82C532, "82C532"), DEVICE( VLSI, VLSI_82C534, "82C534"), DEVICE( VLSI, VLSI_82C535, "82C535"), DEVICE( VLSI, VLSI_82C147, "82C147"), DEVICE( VLSI, VLSI_VAS96011, "VAS96011 (Golden Gate II)"), DEVICE( ADL, ADL_2301, "2301"), DEVICE( NS, NS_87415, "87415"), DEVICE( NS, NS_87410, "87410"), DEVICE( TSENG, TSENG_W32P_2, "ET4000W32P"), DEVICE( TSENG, TSENG_W32P_b, "ET4000W32P rev B"), DEVICE( TSENG, TSENG_W32P_c, "ET4000W32P rev C"), DEVICE( TSENG, TSENG_W32P_d, "ET4000W32P rev D"), DEVICE( TSENG, TSENG_ET6000, "ET6000"), DEVICE( WEITEK, WEITEK_P9000, "P9000"), DEVICE( WEITEK, WEITEK_P9100, "P9100"), DEVICE( DEC, DEC_BRD, "DC21050"), DEVICE( DEC, DEC_TULIP, "DC21040"), DEVICE( DEC, DEC_TGA, "TGA"), DEVICE( DEC, DEC_TULIP_FAST, "DC21140"), DEVICE( DEC, DEC_TGA2, "TGA2"), DEVICE( DEC, DEC_FDDI, "DEFPA"), DEVICE( DEC, DEC_TULIP_PLUS, "DC21041"), DEVICE( DEC, DEC_21142, "DC21142"), DEVICE( DEC, DEC_21052, "DC21052"), DEVICE( DEC, DEC_21150, "DC21150"), DEVICE( DEC, DEC_21152, "DC21152"), DEVICE( DEC, DEC_21153, "DC21153"), DEVICE( DEC, DEC_21154, "DC21154"), DEVICE( DEC, DEC_21285, "DC21285 Footbridge"), DEVICE( DEC, DEC_21554, "DC21554 DrawBridge"), DEVICE( CIRRUS, CIRRUS_7548, "GD 7548"), DEVICE( CIRRUS, CIRRUS_5430, "GD 5430"), DEVICE( CIRRUS, CIRRUS_5434_4, "GD 5434"), DEVICE( CIRRUS, CIRRUS_5434_8, "GD 5434"), DEVICE( CIRRUS, CIRRUS_5436, "GD 5436"), DEVICE( CIRRUS, CIRRUS_5446, "GD 5446"), DEVICE( CIRRUS, CIRRUS_5480, "GD 5480"), DEVICE( CIRRUS, CIRRUS_5464, "GD 5464"), DEVICE( CIRRUS, CIRRUS_5465, "GD 5465"), DEVICE( CIRRUS, CIRRUS_6729, "CL 6729"), DEVICE( CIRRUS, CIRRUS_6832, "PD 6832"), DEVICE( CIRRUS, CIRRUS_7542, "CL 7542"), DEVICE( CIRRUS, CIRRUS_7543, "CL 7543"), DEVICE( CIRRUS, CIRRUS_7541, "CL 7541"), DEVICE( IBM, IBM_FIRE_CORAL, "Fire Coral"), DEVICE( IBM, IBM_TR, "Token Ring"), DEVICE( IBM, IBM_82G2675, "82G2675"), DEVICE( IBM, IBM_MCA, "MicroChannel"), DEVICE( IBM, IBM_82351, "82351"), DEVICE( IBM, IBM_PYTHON, "Python"), DEVICE( IBM, IBM_SERVERAID, "ServeRAID"), DEVICE( IBM, IBM_TR_WAKE, "Wake On LAN Token Ring"), DEVICE( IBM, IBM_MPIC, "MPIC-2 Interrupt Controller"), DEVICE( IBM, IBM_3780IDSP, "MWave DSP"), DEVICE( IBM, IBM_MPIC_2, "MPIC-2 ASIC Interrupt Controller"), DEVICE( WD, WD_7197, "WD 7197"), DEVICE( AMD, AMD_LANCE, "79C970"), DEVICE( AMD, AMD_SCSI, "53C974"), DEVICE( TRIDENT, TRIDENT_9397, "Cyber9397"), DEVICE( TRIDENT, TRIDENT_9420, "TG 9420"), DEVICE( TRIDENT, TRIDENT_9440, "TG 9440"), DEVICE( TRIDENT, TRIDENT_9660, "TG 9660 / Cyber9385"), DEVICE( TRIDENT, TRIDENT_9750, "Image 975"), DEVICE( AI, AI_M1435, "M1435"), DEVICE( MATROX, MATROX_MGA_2, "Atlas PX2085"), DEVICE( MATROX, MATROX_MIL, "Millennium"), DEVICE( MATROX, MATROX_MYS, "Mystique"), DEVICE( MATROX, MATROX_MIL_2, "Millennium II"), DEVICE( MATROX, MATROX_MIL_2_AGP,"Millennium II AGP"), DEVICE( MATROX, MATROX_G200_PCI,"Matrox G200 PCI"), DEVICE( MATROX, MATROX_G200_AGP,"Matrox G200 AGP"), DEVICE( MATROX, MATROX_MGA_IMP, "MGA Impression"), DEVICE( MATROX, MATROX_G100_MM, "Matrox G100 multi monitor"), DEVICE( MATROX, MATROX_G100_AGP,"Matrox G100 AGP"), DEVICE( CT, CT_65545, "65545"), DEVICE( CT, CT_65548, "65548"), DEVICE( CT, CT_65550, "65550"), DEVICE( CT, CT_65554, "65554"), DEVICE( CT, CT_65555, "65555"), DEVICE( MIRO, MIRO_36050, "ZR36050"), DEVICE( NEC, NEC_PCX2, "PowerVR PCX2"), DEVICE( FD, FD_36C70, "TMC-18C30"), DEVICE( SI, SI_5591_AGP, "5591/5592 AGP"), DEVICE( SI, SI_6202, "6202"), DEVICE( SI, SI_503, "85C503"), DEVICE( SI, SI_ACPI, "ACPI"), DEVICE( SI, SI_5597_VGA, "5597/5598 VGA"), DEVICE( SI, SI_6205, "6205"), DEVICE( SI, SI_501, "85C501"), DEVICE( SI, SI_496, "85C496"), DEVICE( SI, SI_601, "85C601"), DEVICE( SI, SI_5107, "5107"), DEVICE( SI, SI_5511, "85C5511"), DEVICE( SI, SI_5513, "85C5513"), DEVICE( SI, SI_5571, "5571"), DEVICE( SI, SI_5591, "5591/5592 Host"), DEVICE( SI, SI_5597, "5597/5598 Host"), DEVICE( SI, SI_7001, "7001 USB"), DEVICE( HP, HP_J2585A, "J2585A"), DEVICE( HP, HP_J2585B, "J2585B (Lassen)"), DEVICE( PCTECH, PCTECH_RZ1000, "RZ1000 (buggy)"), DEVICE( PCTECH, PCTECH_RZ1001, "RZ1001 (buggy?)"), DEVICE( PCTECH, PCTECH_SAMURAI_0,"Samurai 0"), DEVICE( PCTECH, PCTECH_SAMURAI_1,"Samurai 1"), DEVICE( PCTECH, PCTECH_SAMURAI_IDE,"Samurai IDE"), DEVICE( DPT, DPT, "SmartCache/Raid"), DEVICE( OPTI, OPTI_92C178, "92C178"), DEVICE( OPTI, OPTI_82C557, "82C557 Viper-M"), DEVICE( OPTI, OPTI_82C558, "82C558 Viper-M ISA+IDE"), DEVICE( OPTI, OPTI_82C621, "82C621"), DEVICE( OPTI, OPTI_82C700, "82C700"), DEVICE( OPTI, OPTI_82C701, "82C701 FireStar Plus"), DEVICE( OPTI, OPTI_82C814, "82C814 Firebridge 1"), DEVICE( OPTI, OPTI_82C822, "82C822"), DEVICE( OPTI, OPTI_82C825, "82C825 Firebridge 2"), DEVICE( SGS, SGS_2000, "STG 2000X"), DEVICE( SGS, SGS_1764, "STG 1764X"), DEVICE( BUSLOGIC, BUSLOGIC_MULTIMASTER_NC, "MultiMaster NC"), DEVICE( BUSLOGIC, BUSLOGIC_MULTIMASTER, "MultiMaster"), DEVICE( BUSLOGIC, BUSLOGIC_FLASHPOINT, "FlashPoint"), DEVICE( TI, TI_TVP4010, "TVP4010 Permedia"), DEVICE( TI, TI_TVP4020, "TVP4020 Permedia 2"), DEVICE( TI, TI_PCI1130, "PCI1130"), DEVICE( TI, TI_PCI1131, "PCI1131"), DEVICE( TI, TI_PCI1250, "PCI1250"), DEVICE( OAK, OAK_OTI107, "OTI107"), DEVICE( WINBOND2, WINBOND2_89C940,"NE2000-PCI"), DEVICE( MOTOROLA, MOTOROLA_MPC105,"MPC105 Eagle"), DEVICE( MOTOROLA, MOTOROLA_MPC106,"MPC106 Grackle"), DEVICE( MOTOROLA, MOTOROLA_RAVEN, "Raven"), DEVICE( MOTOROLA, MOTOROLA_FALCON,"Falcon"), DEVICE( MOTOROLA, MOTOROLA_CPX8216,"CPX8216"), DEVICE( PROMISE, PROMISE_20246, "IDE UltraDMA/33"), DEVICE( PROMISE, PROMISE_5300, "DC5030"), DEVICE( N9, N9_I128, "Imagine 128"), DEVICE( N9, N9_I128_2, "Imagine 128v2"), DEVICE( N9, N9_I128_T2R, "Revolution 3D"), DEVICE( UMC, UMC_UM8673F, "UM8673F"), DEVICE( UMC, UMC_UM8891A, "UM8891A"), DEVICE( UMC, UMC_UM8886BF, "UM8886BF"), DEVICE( UMC, UMC_UM8886A, "UM8886A"), DEVICE( UMC, UMC_UM8881F, "UM8881F"), DEVICE( UMC, UMC_UM8886F, "UM8886F"), DEVICE( UMC, UMC_UM9017F, "UM9017F"), DEVICE( UMC, UMC_UM8886N, "UM8886N"), DEVICE( UMC, UMC_UM8891N, "UM8891N"), DEVICE( X, X_AGX016, "ITT AGX016"), DEVICE( PICOP, PICOP_PT86C52X, "PT86C52x Vesuvius"), DEVICE( PICOP, PICOP_PT80C524, "PT80C524 Nile"), DEVICE( MYLEX, MYLEX_DAC960_P, "DAC960 P Series"), DEVICE( MYLEX, MYLEX_DAC960_PD,"DAC960 PD Series"), DEVICE( MYLEX, MYLEX_DAC960_PG,"DAC960 PG Series"), DEVICE( MYLEX, MYLEX_DAC960_LP,"DAC960 LP Series"), DEVICE( MYLEX, MYLEX_DAC960_BA,"DAC960 BA Series"), DEVICE( APPLE, APPLE_BANDIT, "Bandit"), DEVICE( APPLE, APPLE_GC, "Grand Central"), DEVICE( APPLE, APPLE_HYDRA, "Hydra"), DEVICE( NEXGEN, NEXGEN_82C501, "82C501"), DEVICE( QLOGIC, QLOGIC_ISP1020, "ISP1020"), DEVICE( QLOGIC, QLOGIC_ISP1022, "ISP1022"), DEVICE( CYRIX, CYRIX_5510, "5510"), DEVICE( CYRIX, CYRIX_PCI_MASTER,"PCI Master"), DEVICE( CYRIX, CYRIX_5520, "5520"), DEVICE( CYRIX, CYRIX_5530_LEGACY,"5530 Kahlua Legacy"), DEVICE( CYRIX, CYRIX_5530_SMI, "5530 Kahlua SMI"), DEVICE( CYRIX, CYRIX_5530_IDE, "5530 Kahlua IDE"), DEVICE( CYRIX, CYRIX_5530_AUDIO,"5530 Kahlua Audio"), DEVICE( CYRIX, CYRIX_5530_VIDEO,"5530 Kahlua Video"), DEVICE( LEADTEK, LEADTEK_805, "S3 805"), DEVICE( CONTAQ, CONTAQ_82C599, "82C599"), DEVICE( CONTAQ, CONTAQ_82C693, "82C693"), DEVICE( OLICOM, OLICOM_OC3136, "OC-3136/3137"), DEVICE( OLICOM, OLICOM_OC2315, "OC-2315"), DEVICE( OLICOM, OLICOM_OC2325, "OC-2325"), DEVICE( OLICOM, OLICOM_OC2183, "OC-2183/2185"), DEVICE( OLICOM, OLICOM_OC2326, "OC-2326"), DEVICE( OLICOM, OLICOM_OC6151, "OC-6151/6152"), DEVICE( SUN, SUN_EBUS, "PCI-EBus Bridge"), DEVICE( SUN, SUN_HAPPYMEAL, "Happy Meal Ethernet"), DEVICE( SUN, SUN_SIMBA, "Advanced PCI Bridge"), DEVICE( SUN, SUN_PBM, "PCI Bus Module"), DEVICE( SUN, SUN_SABRE, "Ultra IIi PCI"), DEVICE( CMD, CMD_640, "640 (buggy)"), DEVICE( CMD, CMD_643, "643"), DEVICE( CMD, CMD_646, "646"), DEVICE( CMD, CMD_670, "670"), DEVICE( VISION, VISION_QD8500, "QD-8500"), DEVICE( VISION, VISION_QD8580, "QD-8580"), DEVICE( BROOKTREE, BROOKTREE_848, "Bt848"), DEVICE( BROOKTREE, BROOKTREE_849A, "Bt849"), DEVICE( BROOKTREE, BROOKTREE_878_1,"Bt878 2nd Contr. (?)"), DEVICE( BROOKTREE, BROOKTREE_878, "Bt878"), DEVICE( BROOKTREE, BROOKTREE_8474, "Bt8474"), DEVICE( SIERRA, SIERRA_STB, "STB Horizon 64"), DEVICE( ACC, ACC_2056, "2056"), DEVICE( WINBOND, WINBOND_83769, "W83769F"), DEVICE( WINBOND, WINBOND_82C105, "SL82C105"), DEVICE( WINBOND, WINBOND_83C553, "W83C553"), DEVICE( DATABOOK, DATABOOK_87144, "DB87144"), DEVICE( PLX, PLX_9050, "PCI9050 I2O"), DEVICE( PLX, PLX_9080, "PCI9080 I2O"), DEVICE( MADGE, MADGE_MK2, "Smart 16/4 BM Mk2 Ringnode"), DEVICE( MADGE, MADGE_C155S, "Collage 155 Server"), DEVICE( 3COM, 3COM_3C339, "3C339 TokenRing"), DEVICE( 3COM, 3COM_3C590, "3C590 10bT"), DEVICE( 3COM, 3COM_3C595TX, "3C595 100bTX"), DEVICE( 3COM, 3COM_3C595T4, "3C595 100bT4"), DEVICE( 3COM, 3COM_3C595MII, "3C595 100b-MII"), DEVICE( 3COM, 3COM_3C900TPO, "3C900 10bTPO"), DEVICE( 3COM, 3COM_3C900COMBO,"3C900 10b Combo"), DEVICE( 3COM, 3COM_3C905TX, "3C905 100bTX"), DEVICE( 3COM, 3COM_3C905T4, "3C905 100bT4"), DEVICE( 3COM, 3COM_3C905B_TX, "3C905B 100bTX"), DEVICE( SMC, SMC_EPIC100, "9432 TX"), DEVICE( AL, AL_M1445, "M1445"), DEVICE( AL, AL_M1449, "M1449"), DEVICE( AL, AL_M1451, "M1451"), DEVICE( AL, AL_M1461, "M1461"), DEVICE( AL, AL_M1489, "M1489"), DEVICE( AL, AL_M1511, "M1511"), DEVICE( AL, AL_M1513, "M1513"), DEVICE( AL, AL_M1521, "M1521"), DEVICE( AL, AL_M1523, "M1523"), DEVICE( AL, AL_M1531, "M1531 Aladdin IV"), DEVICE( AL, AL_M1533, "M1533 Aladdin IV"), DEVICE( AL, AL_M3307, "M3307 MPEG-1 decoder"), DEVICE( AL, AL_M4803, "M4803"), DEVICE( AL, AL_M5219, "M5219"), DEVICE( AL, AL_M5229, "M5229 TXpro"), DEVICE( AL, AL_M5237, "M5237 USB"), DEVICE( SURECOM, SURECOM_NE34, "NE-34PCI LAN"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_NM2070, "Magicgraph NM2070"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128V, "MagicGraph 128V"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128ZV, "MagicGraph 128ZV"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_NM2160, "MagicGraph NM2160"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128ZVPLUS, "MagicGraph 128ZV+"), DEVICE( ASP, ASP_ABP940, "ABP940"), DEVICE( ASP, ASP_ABP940U, "ABP940U"), DEVICE( ASP, ASP_ABP940UW, "ABP940UW"), DEVICE( MACRONIX, MACRONIX_MX98713,"MX98713"), DEVICE( MACRONIX, MACRONIX_MX987x5,"MX98715 / MX98725"), DEVICE( CERN, CERN_SPSB_PMC, "STAR/RD24 SCI-PCI (PMC)"), DEVICE( CERN, CERN_SPSB_PCI, "STAR/RD24 SCI-PCI (PMC)"), DEVICE( CERN, CERN_HIPPI_DST, "HIPPI destination"), DEVICE( CERN, CERN_HIPPI_SRC, "HIPPI source"), DEVICE( IMS, IMS_8849, "8849"), DEVICE( TEKRAM2, TEKRAM2_690c, "DC690c"), DEVICE( TUNDRA, TUNDRA_CA91C042,"CA91C042 Universe"), DEVICE( AMCC, AMCC_MYRINET, "Myrinet PCI (M2-PCI-32)"), DEVICE( AMCC, AMCC_PARASTATION,"ParaStation Interface"), DEVICE( AMCC, AMCC_S5933, "S5933 PCI44"), DEVICE( AMCC, AMCC_S5933_HEPC3,"S5933 Traquair HEPC3"), DEVICE( INTERG, INTERG_1680, "IGA-1680"), DEVICE( INTERG, INTERG_1682, "IGA-1682"), DEVICE( REALTEK, REALTEK_8029, "8029"), DEVICE( REALTEK, REALTEK_8129, "8129"), DEVICE( REALTEK, REALTEK_8139, "8139"), DEVICE( TRUEVISION, TRUEVISION_T1000,"TARGA 1000"), DEVICE( INIT, INIT_320P, "320 P"), DEVICE( INIT, INIT_360P, "360 P"), DEVICE( TTI, TTI_HPT343, "HPT343"), DEVICE( VIA, VIA_82C505, "VT 82C505"), DEVICE( VIA, VIA_82C561, "VT 82C561"), DEVICE( VIA, VIA_82C586_1, "VT 82C586 Apollo IDE"), DEVICE( VIA, VIA_82C576, "VT 82C576 3V"), DEVICE( VIA, VIA_82C585, "VT 82C585 Apollo VP1/VPX"), DEVICE( VIA, VIA_82C586_0, "VT 82C586 Apollo ISA"), DEVICE( VIA, VIA_82C595, "VT 82C595 Apollo VP2"), DEVICE( VIA, VIA_82C596_0, "VT 82C596 Apollo Pro"), DEVICE( VIA, VIA_82C597_0, "VT 82C597 Apollo VP3"), DEVICE( VIA, VIA_82C598_0, "VT 82C598 Apollo MVP3"), DEVICE( VIA, VIA_82C926, "VT 82C926 Amazon"), DEVICE( VIA, VIA_82C416, "VT 82C416MV"), DEVICE( VIA, VIA_82C595_97, "VT 82C595 Apollo VP2/97"), DEVICE( VIA, VIA_82C586_2, "VT 82C586 Apollo USB"), DEVICE( VIA, VIA_82C586_3, "VT 82C586B Apollo ACPI"), DEVICE( VIA, VIA_86C100A, "VT 86C100A"), DEVICE( VIA, VIA_82C597_1, "VT 82C597 Apollo VP3 AGP"), DEVICE( VIA, VIA_82C598_1, "VT 82C598 Apollo MVP3 AGP"), DEVICE( SMC2, SMC2_1211TX, "1211 TX"), DEVICE( VORTEX, VORTEX_GDT60x0, "GDT 60x0"), DEVICE( VORTEX, VORTEX_GDT6000B,"GDT 6000b"), DEVICE( VORTEX, VORTEX_GDT6x10, "GDT 6110/6510"), DEVICE( VORTEX, VORTEX_GDT6x20, "GDT 6120/6520"), DEVICE( VORTEX, VORTEX_GDT6530, "GDT 6530"), DEVICE( VORTEX, VORTEX_GDT6550, "GDT 6550"), DEVICE( VORTEX, VORTEX_GDT6x17, "GDT 6117/6517"), DEVICE( VORTEX, VORTEX_GDT6x27, "GDT 6127/6527"), DEVICE( VORTEX, VORTEX_GDT6537, "GDT 6537"), DEVICE( VORTEX, VORTEX_GDT6557, "GDT 6557"), DEVICE( VORTEX, VORTEX_GDT6x15, "GDT 6115/6515"), DEVICE( VORTEX, VORTEX_GDT6x25, "GDT 6125/6525"), DEVICE( VORTEX, VORTEX_GDT6535, "GDT 6535"), DEVICE( VORTEX, VORTEX_GDT6555, "GDT 6555"), DEVICE( VORTEX, VORTEX_GDT6x17RP,"GDT 6117RP/6517RP"), DEVICE( VORTEX, VORTEX_GDT6x27RP,"GDT 6127RP/6527RP"), DEVICE( VORTEX, VORTEX_GDT6537RP,"GDT 6537RP"), DEVICE( VORTEX, VORTEX_GDT6557RP,"GDT 6557RP"), DEVICE( VORTEX, VORTEX_GDT6x11RP,"GDT 6111RP/6511RP"), DEVICE( VORTEX, VORTEX_GDT6x21RP,"GDT 6121RP/6521RP"), DEVICE( VORTEX, VORTEX_GDT6x17RP1,"GDT 6117RP1/6517RP1"), DEVICE( VORTEX, VORTEX_GDT6x27RP1,"GDT 6127RP1/6527RP1"), DEVICE( VORTEX, VORTEX_GDT6537RP1,"GDT 6537RP1"), DEVICE( VORTEX, VORTEX_GDT6557RP1,"GDT 6557RP1"), DEVICE( VORTEX, VORTEX_GDT6x11RP1,"GDT 6111RP1/6511RP1"), DEVICE( VORTEX, VORTEX_GDT6x21RP1,"GDT 6121RP1/6521RP1"), DEVICE( VORTEX, VORTEX_GDT6x17RP2,"GDT 6117RP2/6517RP2"), DEVICE( VORTEX, VORTEX_GDT6x27RP2,"GDT 6127RP2/6527RP2"), DEVICE( VORTEX, VORTEX_GDT6537RP2,"GDT 6537RP2"), DEVICE( VORTEX, VORTEX_GDT6557RP2,"GDT 6557RP2"), DEVICE( VORTEX, VORTEX_GDT6x11RP2,"GDT 6111RP2/6511RP2"), DEVICE( VORTEX, VORTEX_GDT6x21RP2,"GDT 6121RP2/6521RP2"), DEVICE( EF, EF_ATM_FPGA, "155P-MF1 (FPGA)"), DEVICE( EF, EF_ATM_ASIC, "155P-MF1 (ASIC)"), DEVICE( FORE, FORE_PCA200PC, "PCA-200PC"), DEVICE( FORE, FORE_PCA200E, "PCA-200E"), DEVICE( IMAGINGTECH, IMAGINGTECH_ICPCI, "MVC IC-PCI"), DEVICE( PHILIPS, PHILIPS_SAA7145,"SAA7145"), DEVICE( PHILIPS, PHILIPS_SAA7146,"SAA7146"), DEVICE( CYCLONE, CYCLONE_SDK, "SDK"), DEVICE( ALLIANCE, ALLIANCE_PROMOTIO, "Promotion-6410"), DEVICE( ALLIANCE, ALLIANCE_PROVIDEO, "Provideo"), DEVICE( ALLIANCE, ALLIANCE_AT24, "AT24"), DEVICE( ALLIANCE, ALLIANCE_AT3D, "AT3D"), DEVICE( SYSKONNECT, SYSKONNECT_FP, "SK-FDDI-PCI"), DEVICE( SYSKONNECT, SYSKONNECT_TR, "SK-TR-PCI"), DEVICE( SYSKONNECT, SYSKONNECT_GE, "SK-98xx"), DEVICE( VMIC, VMIC_VME, "VMIVME-7587"), DEVICE( DIGI, DIGI_EPC, "AccelPort EPC"), DEVICE( DIGI, DIGI_RIGHTSWITCH, "RightSwitch SE-6"), DEVICE( DIGI, DIGI_XEM, "AccelPort Xem"), DEVICE( DIGI, DIGI_XR, "AccelPort Xr"), DEVICE( DIGI, DIGI_CX, "AccelPort C/X"), DEVICE( DIGI, DIGI_XRJ, "AccelPort Xr/J"), DEVICE( DIGI, DIGI_EPCJ, "AccelPort EPC/J"), DEVICE( DIGI, DIGI_XR_920, "AccelPort Xr 920"), DEVICE( MUTECH, MUTECH_MV1000, "MV-1000"), DEVICE( RENDITION, RENDITION_VERITE,"Verite 1000"), DEVICE( RENDITION, RENDITION_VERITE2100,"Verite 2100"), DEVICE( SERVERWORKS, SERVERWORKS_HE, "CNB20HE PCI Bridge"), DEVICE( SERVERWORKS, SERVERWORKS_LE, "CNB30LE PCI Bridge"), DEVICE( SERVERWORKS, SERVERWORKS_CMIC_HE, "CMIC-HE PCI Bridge"), DEVICE( SERVERWORKS, SERVERWORKS_CIOB30, "CIOB30 I/O Bridge"), DEVICE( SERVERWORKS, SERVERWORKS_CSB5, "CSB5 PCI Bridge"), DEVICE( TOSHIBA, TOSHIBA_601, "Laptop"), DEVICE( TOSHIBA, TOSHIBA_TOPIC95,"ToPIC95"), DEVICE( TOSHIBA, TOSHIBA_TOPIC97,"ToPIC97"), DEVICE( RICOH, RICOH_RL5C466, "RL5C466"), DEVICE( ARTOP, ARTOP_ATP8400, "ATP8400"), DEVICE( ARTOP, ARTOP_ATP850UF, "ATP850UF"), DEVICE( ZEITNET, ZEITNET_1221, "1221"), DEVICE( ZEITNET, ZEITNET_1225, "1225"), DEVICE( OMEGA, OMEGA_82C092G, "82C092G"), DEVICE( LITEON, LITEON_LNE100TX,"LNE100TX"), DEVICE( NP, NP_PCI_FDDI, "NP-PCI"), DEVICE( ATT, ATT_L56XMF, "L56xMF"), DEVICE( ATT, ATT_L56DVP, "L56DV+P"), DEVICE( SPECIALIX, SPECIALIX_IO8, "IO8+/PCI"), DEVICE( SPECIALIX, SPECIALIX_XIO, "XIO/SIO host"), DEVICE( SPECIALIX, SPECIALIX_RIO, "RIO host"), DEVICE( AURAVISION, AURAVISION_VXP524,"VXP524"), DEVICE( IKON, IKON_10115, "10115 Greensheet"), DEVICE( IKON, IKON_10117, "10117 Greensheet"), DEVICE( ZORAN, ZORAN_36057, "ZR36057"), DEVICE( ZORAN, ZORAN_36120, "ZR36120"), DEVICE( KINETIC, KINETIC_2915, "2915 CAMAC"), DEVICE( COMPEX, COMPEX_ENET100VG4, "Readylink ENET100-VG4"), DEVICE( COMPEX, COMPEX_RL2000, "ReadyLink 2000"), DEVICE( RP, RP32INTF, "RocketPort 32 Intf"), DEVICE( RP, RP8INTF, "RocketPort 8 Intf"), DEVICE( RP, RP16INTF, "RocketPort 16 Intf"), DEVICE( RP, RP4QUAD, "Rocketport 4 Quad"), DEVICE( RP, RP8OCTA, "RocketPort 8 Oct"), DEVICE( RP, RP8J, "RocketPort 8 J"), DEVICE( RP, RPP4, "RocketPort Plus 4 Quad"), DEVICE( RP, RPP8, "RocketPort Plus 8 Oct"), DEVICE( RP, RP8M, "RocketModem 8 J"), DEVICE( CYCLADES, CYCLOM_Y_Lo, "Cyclom-Y below 1Mbyte"), DEVICE( CYCLADES, CYCLOM_Y_Hi, "Cyclom-Y above 1Mbyte"), DEVICE( CYCLADES, CYCLOM_4Y_Lo, "Cyclom-4Y below 1Mbyte"), DEVICE( CYCLADES, CYCLOM_4Y_Hi, "Cyclom-4Y above 1Mbyte"), DEVICE( CYCLADES, CYCLOM_8Y_Lo, "Cyclom-8Y below 1Mbyte"), DEVICE( CYCLADES, CYCLOM_8Y_Hi, "Cyclom-8Y above 1Mbyte"), DEVICE( CYCLADES, CYCLOM_Z_Lo, "Cyclades-Z below 1Mbyte"), DEVICE( CYCLADES, CYCLOM_Z_Hi, "Cyclades-Z above 1Mbyte"), DEVICE( CYCLADES, PC300_RX_2, "PC300/RSV or /X21 (2 ports)"), DEVICE( CYCLADES, PC300_RX_1, "PC300/RSV or /X21 (1 port)"), DEVICE( CYCLADES, PC300_TE_2, "PC300/TE (2 ports)"), DEVICE( CYCLADES, PC300_TE_1, "PC300/TE (1 port)"), DEVICE( ESSENTIAL, ESSENTIAL_ROADRUNNER,"Roadrunner serial HIPPI"), DEVICE( O2, O2_6832, "6832"), DEVICE( 3DFX, 3DFX_VOODOO, "Voodoo"), DEVICE( 3DFX, 3DFX_VOODOO2, "Voodoo2"), DEVICE( 3DFX, 3DFX_BANSHEE, "Banshee"), DEVICE( SIGMADES, SIGMADES_6425, "REALmagic64/GX"), DEVICE( AVM, AVM_A1, "A1 (Fritz)"), DEVICE( STALLION, STALLION_ECHPCI832,"EasyConnection 8/32"), DEVICE( STALLION, STALLION_ECHPCI864,"EasyConnection 8/64"), DEVICE( STALLION, STALLION_EIOPCI,"EasyIO"), DEVICE( OPTIBASE, OPTIBASE_FORGE, "MPEG Forge"), DEVICE( OPTIBASE, OPTIBASE_FUSION,"MPEG Fusion"), DEVICE( OPTIBASE, OPTIBASE_VPLEX, "VideoPlex"), DEVICE( OPTIBASE, OPTIBASE_VPLEXCC,"VideoPlex CC"), DEVICE( OPTIBASE, OPTIBASE_VQUEST,"VideoQuest"), DEVICE( SATSAGEM, SATSAGEM_PCR2101,"PCR2101 DVB receiver"), DEVICE( SATSAGEM, SATSAGEM_TELSATTURBO,"Telsat Turbo DVB"), DEVICE( HUGHES, HUGHES_DIRECPC, "DirecPC"), DEVICE( ENSONIQ, ENSONIQ_ES1371, "ES1371"), DEVICE( ENSONIQ, ENSONIQ_AUDIOPCI,"AudioPCI"), DEVICE( ALTEON, ALTEON_ACENIC, "AceNIC"), DEVICE( PICTUREL, PICTUREL_PCIVST,"PCIVST"), DEVICE( NVIDIA_SGS, NVIDIA_SGS_RIVA128, "Riva 128"), DEVICE( CBOARDS, CBOARDS_DAS1602_16,"DAS1602/16"), DEVICE( MOTOROLA_OOPS, MOTOROLA_FALCON,"Falcon"), DEVICE( TIMEDIA, TIMEDIA_4008A, "Noname 4008A"), DEVICE( SYMPHONY, SYMPHONY_101, "82C101"), DEVICE( TEKRAM, TEKRAM_DC290, "DC-290"), DEVICE( 3DLABS, 3DLABS_300SX, "GLINT 300SX"), DEVICE( 3DLABS, 3DLABS_500TX, "GLINT 500TX"), DEVICE( 3DLABS, 3DLABS_DELTA, "GLINT Delta"), DEVICE( 3DLABS, 3DLABS_PERMEDIA,"PERMEDIA"), DEVICE( 3DLABS, 3DLABS_MX, "GLINT MX"), DEVICE( AVANCE, AVANCE_ALG2064, "ALG2064i"), DEVICE( AVANCE, AVANCE_2302, "ALG-2302"), DEVICE( NETVIN, NETVIN_NV5000SC,"NV5000"), DEVICE( S3, S3_PLATO_PXS, "PLATO/PX (system)"), DEVICE( S3, S3_ViRGE, "ViRGE"), DEVICE( S3, S3_TRIO, "Trio32/Trio64"), DEVICE( S3, S3_AURORA64VP, "Aurora64V+"), DEVICE( S3, S3_TRIO64UVP, "Trio64UV+"), DEVICE( S3, S3_ViRGE_VX, "ViRGE/VX"), DEVICE( S3, S3_868, "Vision 868"), DEVICE( S3, S3_928, "Vision 928-P"), DEVICE( S3, S3_864_1, "Vision 864-P"), DEVICE( S3, S3_864_2, "Vision 864-P"), DEVICE( S3, S3_964_1, "Vision 964-P"), DEVICE( S3, S3_964_2, "Vision 964-P"), DEVICE( S3, S3_968, "Vision 968"), DEVICE( S3, S3_TRIO64V2, "Trio64V2/DX or /GX"), DEVICE( S3, S3_PLATO_PXG, "PLATO/PX (graphics)"), DEVICE( S3, S3_ViRGE_DXGX, "ViRGE/DX or /GX"), DEVICE( S3, S3_ViRGE_GX2, "ViRGE/GX2"), DEVICE( S3, S3_ViRGE_MX, "ViRGE/MX"), DEVICE( S3, S3_ViRGE_MXP, "ViRGE/MX+"), DEVICE( S3, S3_ViRGE_MXPMV, "ViRGE/MX+MV"), DEVICE( S3, S3_SONICVIBES, "SonicVibes"), DEVICE( DCI, DCI_PCCOM4, "PC COM PCI Bus 4 port serial Adapter"), DEVICE( GENROCO, GENROCO_HFP832, "TURBOstor HFP832"), DEVICE( INTEL, INTEL_82375, "82375EB"), DEVICE( INTEL, INTEL_82424, "82424ZX Saturn"), DEVICE( INTEL, INTEL_82378, "82378IB"), DEVICE( INTEL, INTEL_82430, "82430ZX Aries"), DEVICE( INTEL, INTEL_82434, "82434LX Mercury/Neptune"), DEVICE( INTEL, INTEL_I960, "i960"), DEVICE( INTEL, INTEL_I960RN, "i960 RN"), DEVICE( INTEL, INTEL_82559ER, "82559ER"), DEVICE( INTEL, INTEL_82092AA_0,"82092AA PCMCIA bridge"), DEVICE( INTEL, INTEL_82092AA_1,"82092AA EIDE"), DEVICE( INTEL, INTEL_7116, "SAA7116"), DEVICE( INTEL, INTEL_82596, "82596"), DEVICE( INTEL, INTEL_82865, "82865"), DEVICE( INTEL, INTEL_82557, "82557"), DEVICE( INTEL, INTEL_82437, "82437"), DEVICE( INTEL, INTEL_82371FB_0,"82371FB PIIX ISA"), DEVICE( INTEL, INTEL_82371FB_1,"82371FB PIIX IDE"), DEVICE( INTEL, INTEL_82371MX, "430MX - 82371MX MPIIX"), DEVICE( INTEL, INTEL_82437MX, "430MX - 82437MX MTSC"), DEVICE( INTEL, INTEL_82441, "82441FX Natoma"), DEVICE( INTEL, INTEL_82380FB, "82380FB Mobile"), DEVICE( INTEL, INTEL_82439, "82439HX Triton II"), DEVICE( INTEL, INTEL_MEGARAID, "OEM MegaRAID Controller"), DEVICE( INTEL, INTEL_82371SB_0,"82371SB PIIX3 ISA"), DEVICE( INTEL, INTEL_82371SB_1,"82371SB PIIX3 IDE"), DEVICE( INTEL, INTEL_82371SB_2,"82371SB PIIX3 USB"), DEVICE( INTEL, INTEL_82437VX, "82437VX Triton II"), DEVICE( INTEL, INTEL_82439TX, "82439TX"), DEVICE( INTEL, INTEL_82371AB_0,"82371AB PIIX4 ISA"), DEVICE( INTEL, INTEL_82371AB, "82371AB PIIX4 IDE"), DEVICE( INTEL, INTEL_82371AB_2,"82371AB PIIX4 USB"), DEVICE( INTEL, INTEL_82371AB_3,"82371AB PIIX4 ACPI"), DEVICE( INTEL, INTEL_82443LX_0,"440LX - 82443LX PAC Host"), DEVICE( INTEL, INTEL_82443LX_1,"440LX - 82443LX PAC AGP"), DEVICE( INTEL, INTEL_82443BX_0,"440BX - 82443BX Host"), DEVICE( INTEL, INTEL_82443BX_1,"440BX - 82443BX AGP"), DEVICE( INTEL, INTEL_82443BX_2,"440BX - 82443BX Host (no AGP)"), DEVICE( INTEL, INTEL_P6, "Orion P6"), DEVICE( INTEL, INTEL_82450GX, "450KX/GX [Orion] - 82454KX/GX PCI Bridge"), DEVICE( INTEL, INTEL_82453GX, "450KX/GX [Orion] - 82453KX/GX Memory Controller"), DEVICE( INTEL, INTEL_82451NX, "450NX - 82451NX Memory & I/O Controller"), DEVICE( INTEL, INTEL_82454NX, "450NX - 82454NX PCI Expander Bridge"), DEVICE( COMPUTONE, COMPUTONE_IP2EX, "Computone IntelliPort Plus"), DEVICE( KTI, KTI_ET32P2, "ET32P2"), DEVICE( ADAPTEC, ADAPTEC_7810, "AIC-7810 RAID"), DEVICE( ADAPTEC, ADAPTEC_7821, "AIC-7860"), DEVICE( ADAPTEC, ADAPTEC_38602, "AIC-7860"), DEVICE( ADAPTEC, ADAPTEC_7850, "AIC-7850"), DEVICE( ADAPTEC, ADAPTEC_7855, "AIC-7855"), DEVICE( ADAPTEC, ADAPTEC_5800, "AIC-5800"), DEVICE( ADAPTEC, ADAPTEC_3860, "AIC-7860"), DEVICE( ADAPTEC, ADAPTEC_7860, "AIC-7860"), DEVICE( ADAPTEC, ADAPTEC_7861, "AIC-7861"), DEVICE( ADAPTEC, ADAPTEC_7870, "AIC-7870"), DEVICE( ADAPTEC, ADAPTEC_7871, "AIC-7871"), DEVICE( ADAPTEC, ADAPTEC_7872, "AIC-7872"), DEVICE( ADAPTEC, ADAPTEC_7873, "AIC-7873"), DEVICE( ADAPTEC, ADAPTEC_7874, "AIC-7874"), DEVICE( ADAPTEC, ADAPTEC_7895, "AIC-7895U"), DEVICE( ADAPTEC, ADAPTEC_7880, "AIC-7880U"), DEVICE( ADAPTEC, ADAPTEC_7881, "AIC-7881U"), DEVICE( ADAPTEC, ADAPTEC_7882, "AIC-7882U"), DEVICE( ADAPTEC, ADAPTEC_7883, "AIC-7883U"), DEVICE( ADAPTEC, ADAPTEC_7884, "AIC-7884U"), DEVICE( ADAPTEC, ADAPTEC_7885, "AIC-7885U"), DEVICE( ADAPTEC, ADAPTEC_7886, "AIC-7886U"), DEVICE( ADAPTEC, ADAPTEC_7887, "AIC-7887U"), DEVICE( ADAPTEC, ADAPTEC_7888, "AIC-7888U"), DEVICE( ADAPTEC, ADAPTEC_1030, "ABA-1030 DVB receiver"), DEVICE( ADAPTEC2, ADAPTEC2_2940U2,"AHA-2940U2"), DEVICE( ADAPTEC2, ADAPTEC2_2930U2,"AHA-2930U2"), DEVICE( ADAPTEC2, ADAPTEC2_7890B, "AIC-7890/1"), DEVICE( ADAPTEC2, ADAPTEC2_7890, "AIC-7890/1"), DEVICE( ADAPTEC2, ADAPTEC2_3940U2,"AHA-3940U2"), DEVICE( ADAPTEC2, ADAPTEC2_3950U2D,"AHA-3950U2D"), DEVICE( ADAPTEC2, ADAPTEC2_7896, "AIC-7896/7"), DEVICE( ADAPTEC2, ADAPTEC2_7892A, "AIC-7892"), DEVICE( ADAPTEC2, ADAPTEC2_7892B, "AIC-7892"), DEVICE( ADAPTEC2, ADAPTEC2_7892D, "AIC-7892"), DEVICE( ADAPTEC2, ADAPTEC2_7892P, "AIC-7892"), DEVICE( ADAPTEC2, ADAPTEC2_7899A, "AIC-7899"), DEVICE( ADAPTEC2, ADAPTEC2_7899B, "AIC-7899"), DEVICE( ADAPTEC2, ADAPTEC2_7899D, "AIC-7899"), DEVICE( ADAPTEC2, ADAPTEC2_7899P, "AIC-7899"), DEVICE( ATRONICS, ATRONICS_2015, "IDE-2015PL"), DEVICE( TIGERJET, TIGERJET_300, "Tiger300 ISDN"), DEVICE( ARK, ARK_STING, "Stingray"), DEVICE( ARK, ARK_STINGARK, "Stingray ARK 2000PV"), DEVICE( ARK, ARK_2000MT, "2000MT") }; /* * device_info[] is sorted so we can use binary search */ static struct pci_dev_info * pci_lookup_dev(unsigned int vendor, unsigned int dev) { int min = 0, max = sizeof(dev_info)/sizeof(dev_info[0]) - 1; for ( ; ; ) { int i = (min + max) >> 1; long order; order = dev_info[i].vendor - (long) vendor; if (!order) order = dev_info[i].device - (long) dev; if (order < 0) { min = i + 1; if ( min > max ) return 0; continue; } if (order > 0) { max = i - 1; if ( min > max ) return 0; continue; } return & dev_info[ i ]; } } static const char * pci_strclass (unsigned int class, char *buf) { char *s; switch (class >> 8) { case PCI_CLASS_NOT_DEFINED: s = "Non-VGA device"; break; case PCI_CLASS_NOT_DEFINED_VGA: s = "VGA compatible device"; break; case PCI_CLASS_STORAGE_SCSI: s = "SCSI storage controller"; break; case PCI_CLASS_STORAGE_IDE: s = "IDE interface"; break; case PCI_CLASS_STORAGE_FLOPPY: s = "Floppy disk controller"; break; case PCI_CLASS_STORAGE_IPI: s = "IPI storage controller"; break; case PCI_CLASS_STORAGE_RAID: s = "RAID storage controller"; break; case PCI_CLASS_STORAGE_OTHER: s = "Unknown mass storage controller"; break; case PCI_CLASS_NETWORK_ETHERNET: s = "Ethernet controller"; break; case PCI_CLASS_NETWORK_TOKEN_RING: s = "Token ring network controller"; break; case PCI_CLASS_NETWORK_FDDI: s = "FDDI network controller"; break; case PCI_CLASS_NETWORK_ATM: s = "ATM network controller"; break; case PCI_CLASS_NETWORK_OTHER: s = "Network controller"; break; case PCI_CLASS_DISPLAY_VGA: s = "VGA compatible controller"; break; case PCI_CLASS_DISPLAY_XGA: s = "XGA compatible controller"; break; case PCI_CLASS_DISPLAY_OTHER: s = "Display controller"; break; case PCI_CLASS_MULTIMEDIA_VIDEO: s = "Multimedia video controller"; break; case PCI_CLASS_MULTIMEDIA_AUDIO: s = "Multimedia audio controller"; break; case PCI_CLASS_MULTIMEDIA_OTHER: s = "Multimedia controller"; break; case PCI_CLASS_MEMORY_RAM: s = "RAM memory"; break; case PCI_CLASS_MEMORY_FLASH: s = "FLASH memory"; break; case PCI_CLASS_MEMORY_OTHER: s = "Memory"; break; case PCI_CLASS_BRIDGE_HOST: s = "Host bridge"; break; case PCI_CLASS_BRIDGE_ISA: s = "ISA bridge"; break; case PCI_CLASS_BRIDGE_EISA: s = "EISA bridge"; break; case PCI_CLASS_BRIDGE_MC: s = "MicroChannel bridge"; break; case PCI_CLASS_BRIDGE_PCI: s = "PCI bridge"; break; case PCI_CLASS_BRIDGE_PCMCIA: s = "PCMCIA bridge"; break; case PCI_CLASS_BRIDGE_NUBUS: s = "NuBus bridge"; break; case PCI_CLASS_BRIDGE_CARDBUS: s = "CardBus bridge"; break; case PCI_CLASS_BRIDGE_OTHER: s = "Bridge"; break; case PCI_CLASS_COMMUNICATION_SERIAL: s = "Serial controller"; break; case PCI_CLASS_COMMUNICATION_PARALLEL: s = "Parallel controller"; break; case PCI_CLASS_COMMUNICATION_OTHER: s = "Communication controller"; break; case PCI_CLASS_SYSTEM_PIC: s = "PIC"; break; case PCI_CLASS_SYSTEM_DMA: s = "DMA controller"; break; case PCI_CLASS_SYSTEM_TIMER: s = "Timer"; break; case PCI_CLASS_SYSTEM_RTC: s = "RTC"; break; case PCI_CLASS_SYSTEM_OTHER: s = "System peripheral"; break; case PCI_CLASS_INPUT_KEYBOARD: s = "Keyboard controller"; break; case PCI_CLASS_INPUT_PEN: s = "Digitizer Pen"; break; case PCI_CLASS_INPUT_MOUSE: s = "Mouse controller"; break; case PCI_CLASS_INPUT_OTHER: s = "Input device controller"; break; case PCI_CLASS_DOCKING_GENERIC: s = "Generic Docking Station"; break; case PCI_CLASS_DOCKING_OTHER: s = "Docking Station"; break; case PCI_CLASS_PROCESSOR_386: s = "386"; break; case PCI_CLASS_PROCESSOR_486: s = "486"; break; case PCI_CLASS_PROCESSOR_PENTIUM: s = "Pentium"; break; case PCI_CLASS_PROCESSOR_ALPHA: s = "Alpha"; break; case PCI_CLASS_PROCESSOR_POWERPC: s = "Power PC"; break; case PCI_CLASS_PROCESSOR_CO: s = "Co-processor"; break; case PCI_CLASS_SERIAL_FIREWIRE: s = "FireWire (IEEE 1394)"; break; case PCI_CLASS_SERIAL_ACCESS: s = "ACCESS Bus"; break; case PCI_CLASS_SERIAL_SSA: s = "SSA"; break; case PCI_CLASS_SERIAL_USB: s = "USB Controller"; break; case PCI_CLASS_SERIAL_FIBER: s = "Fiber Channel"; break; case PCI_CLASS_SERIAL_SMBUS: s = "SM Bus"; break; case PCI_CLASS_HOT_SWAP_CONTROLLER: s = "Hot Swap Controller"; break; default: sprintf(buf, "[PCI_CLASS %x]", class); s = buf; break; } return s; } static const char * pci_strvendor(unsigned int vendor, char *buf) { char *s; switch (vendor) { case PCI_VENDOR_ID_COMPAQ: s = "Compaq"; break; case PCI_VENDOR_ID_NCR: s = "NCR"; break; case PCI_VENDOR_ID_ATI: s = "ATI"; break; case PCI_VENDOR_ID_VLSI: s = "VLSI"; break; case PCI_VENDOR_ID_ADL: s = "Avance Logic"; break; case PCI_VENDOR_ID_NS: s = "NS"; break; case PCI_VENDOR_ID_TSENG: s = "Tseng'Lab"; break; case PCI_VENDOR_ID_WEITEK: s = "Weitek"; break; case PCI_VENDOR_ID_DEC: s = "DEC"; break; case PCI_VENDOR_ID_CIRRUS: s = "Cirrus Logic"; break; case PCI_VENDOR_ID_IBM: s = "IBM"; break; case PCI_VENDOR_ID_WD: s = "Western Digital"; break; case PCI_VENDOR_ID_AMD: s = "AMD"; break; case PCI_VENDOR_ID_TRIDENT: s = "Trident"; break; case PCI_VENDOR_ID_AI: s = "Acer Incorporated"; break; case PCI_VENDOR_ID_MATROX: s = "Matrox"; break; case PCI_VENDOR_ID_CT: s = "Chips & Technologies"; break; case PCI_VENDOR_ID_MIRO: s = "Miro"; break; case PCI_VENDOR_ID_NEC: s = "NEC"; break; case PCI_VENDOR_ID_FD: s = "Future Domain"; break; case PCI_VENDOR_ID_SI: s = "Silicon Integrated Systems"; break; case PCI_VENDOR_ID_HP: s = "Hewlett Packard"; break; case PCI_VENDOR_ID_PCTECH: s = "PCTECH"; break; case PCI_VENDOR_ID_DPT: s = "DPT"; break; case PCI_VENDOR_ID_OPTI: s = "OPTi"; break; case PCI_VENDOR_ID_SGS: s = "SGS Thomson"; break; case PCI_VENDOR_ID_BUSLOGIC: s = "BusLogic"; break; case PCI_VENDOR_ID_TI: s = "Texas Instruments"; break; case PCI_VENDOR_ID_OAK: s = "OAK"; break; case PCI_VENDOR_ID_WINBOND2: s = "Winbond"; break; case PCI_VENDOR_ID_MOTOROLA: s = "Motorola"; break; case PCI_VENDOR_ID_MOTOROLA_OOPS: s = "Motorola"; break; case PCI_VENDOR_ID_PROMISE: s = "Promise Technology"; break; case PCI_VENDOR_ID_N9: s = "Number Nine"; break; case PCI_VENDOR_ID_UMC: s = "UMC"; break; case PCI_VENDOR_ID_X: s = "X TECHNOLOGY"; break; case PCI_VENDOR_ID_MYLEX: s = "Mylex"; break; case PCI_VENDOR_ID_PICOP: s = "PicoPower"; break; case PCI_VENDOR_ID_APPLE: s = "Apple"; break; case PCI_VENDOR_ID_NEXGEN: s = "Nexgen"; break; case PCI_VENDOR_ID_QLOGIC: s = "Q Logic"; break; case PCI_VENDOR_ID_CYRIX: s = "Cyrix"; break; case PCI_VENDOR_ID_LEADTEK: s = "Leadtek Research"; break; case PCI_VENDOR_ID_CONTAQ: s = "Contaq"; break; case PCI_VENDOR_ID_FOREX: s = "Forex"; break; case PCI_VENDOR_ID_OLICOM: s = "Olicom"; break; case PCI_VENDOR_ID_SUN: s = "Sun Microsystems"; break; case PCI_VENDOR_ID_CMD: s = "CMD"; break; case PCI_VENDOR_ID_VISION: s = "Vision"; break; case PCI_VENDOR_ID_BROOKTREE: s = "Brooktree"; break; case PCI_VENDOR_ID_SIERRA: s = "Sierra"; break; case PCI_VENDOR_ID_ACC: s = "ACC MICROELECTRONICS"; break; case PCI_VENDOR_ID_WINBOND: s = "Winbond"; break; case PCI_VENDOR_ID_DATABOOK: s = "Databook"; break; case PCI_VENDOR_ID_PLX: s = "PLX"; break; case PCI_VENDOR_ID_MADGE: s = "Madge Networks"; break; case PCI_VENDOR_ID_3COM: s = "3Com"; break; case PCI_VENDOR_ID_SMC: s = "SMC"; break; case PCI_VENDOR_ID_AL: s = "Acer Labs"; break; case PCI_VENDOR_ID_MITSUBISHI: s = "Mitsubishi"; break; case PCI_VENDOR_ID_SURECOM: s = "Surecom"; break; case PCI_VENDOR_ID_NEOMAGIC: s = "Neomagic"; break; case PCI_VENDOR_ID_ASP: s = "Advanced System Products"; break; case PCI_VENDOR_ID_MACRONIX: s = "Macronix"; break; case PCI_VENDOR_ID_CERN: s = "CERN"; break; case PCI_VENDOR_ID_NVIDIA: s = "NVidia"; break; case PCI_VENDOR_ID_IMS: s = "IMS"; break; case PCI_VENDOR_ID_TEKRAM2: s = "Tekram"; break; case PCI_VENDOR_ID_TUNDRA: s = "Tundra"; break; case PCI_VENDOR_ID_AMCC: s = "AMCC"; break; case PCI_VENDOR_ID_INTERG: s = "Intergraphics"; break; case PCI_VENDOR_ID_REALTEK: s = "Realtek"; break; case PCI_VENDOR_ID_TRUEVISION: s = "Truevision"; break; case PCI_VENDOR_ID_INIT: s = "Initio Corp"; break; case PCI_VENDOR_ID_TTI: s = "Triones Technologies, Inc."; break; case PCI_VENDOR_ID_VIA: s = "VIA Technologies"; break; case PCI_VENDOR_ID_SMC2: s = "SMC"; break; case PCI_VENDOR_ID_VORTEX: s = "VORTEX"; break; case PCI_VENDOR_ID_EF: s = "Efficient Networks"; break; case PCI_VENDOR_ID_FORE: s = "Fore Systems"; break; case PCI_VENDOR_ID_IMAGINGTECH: s = "Imaging Technology"; break; case PCI_VENDOR_ID_PHILIPS: s = "Philips"; break; case PCI_VENDOR_ID_CYCLONE: s = "Cyclone"; break; case PCI_VENDOR_ID_ALLIANCE: s = "Alliance"; break; case PCI_VENDOR_ID_VMIC: s = "VMIC"; break; case PCI_VENDOR_ID_DIGI: s = "Digi Intl."; break; case PCI_VENDOR_ID_MUTECH: s = "Mutech"; break; case PCI_VENDOR_ID_RENDITION: s = "Rendition"; break; case PCI_VENDOR_ID_TOSHIBA: s = "Toshiba"; break; case PCI_VENDOR_ID_RICOH: s = "Ricoh"; break; case PCI_VENDOR_ID_ARTOP: s = "Artop Electronics"; break; case PCI_VENDOR_ID_ZEITNET: s = "ZeitNet"; break; case PCI_VENDOR_ID_OMEGA: s = "Omega Micro"; break; case PCI_VENDOR_ID_LITEON: s = "LiteOn"; break; case PCI_VENDOR_ID_NP: s = "Network Peripherals"; break; case PCI_VENDOR_ID_ATT: s = "Lucent (ex-AT&T) Microelectronics"; break; case PCI_VENDOR_ID_SPECIALIX: s = "Specialix"; break; case PCI_VENDOR_ID_AURAVISION: s = "Auravision"; break; case PCI_VENDOR_ID_IKON: s = "Ikon"; break; case PCI_VENDOR_ID_ZORAN: s = "Zoran"; break; case PCI_VENDOR_ID_KINETIC: s = "Kinetic"; break; case PCI_VENDOR_ID_COMPEX: s = "Compex"; break; case PCI_VENDOR_ID_RP: s = "Comtrol"; break; case PCI_VENDOR_ID_CYCLADES: s = "Cyclades"; break; case PCI_VENDOR_ID_ESSENTIAL: s = "Essential Communications"; break; case PCI_VENDOR_ID_O2: s = "O2 Micro"; break; case PCI_VENDOR_ID_3DFX: s = "3Dfx"; break; case PCI_VENDOR_ID_SIGMADES: s = "Sigma Designs"; break; case PCI_VENDOR_ID_AVM: s = "AVM"; break; case PCI_VENDOR_ID_CCUBE: s = "C-Cube"; break; case PCI_VENDOR_ID_DIPIX: s = "Dipix"; break; case PCI_VENDOR_ID_STALLION: s = "Stallion Technologies"; break; case PCI_VENDOR_ID_OPTIBASE: s = "Optibase"; break; case PCI_VENDOR_ID_SATSAGEM: s = "SatSagem"; break; case PCI_VENDOR_ID_HUGHES: s = "Hughes"; break; case PCI_VENDOR_ID_ENSONIQ: s = "Ensoniq"; break; case PCI_VENDOR_ID_ALTEON: s = "Alteon"; break; case PCI_VENDOR_ID_PICTUREL: s = "Picture Elements"; break; case PCI_VENDOR_ID_NVIDIA_SGS: s = "NVidia/SGS Thomson"; break; case PCI_VENDOR_ID_CBOARDS: s = "ComputerBoards"; break; case PCI_VENDOR_ID_TIMEDIA: s = "Timedia Technology"; break; case PCI_VENDOR_ID_SYMPHONY: s = "Symphony"; break; case PCI_VENDOR_ID_COMPUTONE: s = "Computone Corporation"; break; case PCI_VENDOR_ID_TEKRAM: s = "Tekram"; break; case PCI_VENDOR_ID_3DLABS: s = "3Dlabs"; break; case PCI_VENDOR_ID_AVANCE: s = "Avance"; break; case PCI_VENDOR_ID_NETVIN: s = "NetVin"; break; case PCI_VENDOR_ID_S3: s = "S3 Inc."; break; case PCI_VENDOR_ID_DCI: s = "Decision Computer Int."; break; case PCI_VENDOR_ID_GENROCO: s = "Genroco"; break; case PCI_VENDOR_ID_INTEL: s = "Intel"; break; case PCI_VENDOR_ID_KTI: s = "KTI"; break; case PCI_VENDOR_ID_ADAPTEC: s = "Adaptec"; break; case PCI_VENDOR_ID_ADAPTEC2: s = "Adaptec"; break; case PCI_VENDOR_ID_ATRONICS: s = "Atronics"; break; case PCI_VENDOR_ID_TIGERJET: s = "TigerJet"; break; case PCI_VENDOR_ID_ARK: s = "ARK Logic"; break; case PCI_VENDOR_ID_SYSKONNECT: s = "SysKonnect"; break; default: sprintf(buf, "[PCI_VENDOR %x]", vendor); s = buf; break; } return s; } static const char * pci_strdev(unsigned int vendor, unsigned int device, char *buf) { struct pci_dev_info *info; if ((info = pci_lookup_dev(vendor, device))) return info->name; else { sprintf(buf, "[PCI_DEVICE %x]", device); return buf; } } /* * If the disk's name is started with these strings, we will skip it and do not * display its statistics. */ static char *skipped_disk_name[] = { "ram", "loop", NULL }; static int is_skipped_disk(char *name) { char **p = skipped_disk_name; while (*p) { if (strncmp(name, *p, strlen(*p)) == 0) return TRUE; p++; } return FALSE; } struct diskio { int read; int write; }; struct iter { /* If the kernel uses klist, the address should be klist.k_list */ long head_address; long current_address; long type_address; /* the address of symbol "disk_type" */ /* * If it is true, it means request_list.count[2] contains async/sync * requests. */ int sync_count; int diskname_len; unsigned long (*next_disk)(struct iter *); /* * The argument is the address of request_queue, and the function * returns the total requests in the driver(not ended) */ unsigned int (*get_in_flight)(unsigned long); /* * this function reads request_list.count[2], and the first argument * is the address of request_queue. */ void (*get_diskio)(unsigned long , struct diskio *); /* * check if device.type == &disk_type * * old kernel(version <= 2.6.24) does not have the symbol "disk_type", * and this callback should be null. */ int (*match)(struct iter *, unsigned long); /* * If the kernel uses list, the argument is the address of list_head, * otherwise, the argument is the address of klist_node. */ unsigned long (*get_gendisk)(unsigned long); }; /* kernel version <= 2.6.24 */ static unsigned long get_gendisk_1(unsigned long entry) { return entry - OFFSET(kobject_entry) - OFFSET(gendisk_kobj); } /* 2.6.24 < kernel version <= 2.6.27 */ static unsigned long get_gendisk_2(unsigned long entry) { return entry - OFFSET(device_node) - OFFSET(gendisk_dev); } /* kernel version > 2.6.27 && struct gendisk contains dev/__dev */ static unsigned long get_gendisk_3(unsigned long entry) { return entry - OFFSET(device_knode_class) - OFFSET(gendisk_dev); } /* kernel version > 2.6.27 && struct gendisk does not contain dev/__dev */ static unsigned long get_gendisk_4(unsigned long entry) { return entry - OFFSET(device_knode_class) - OFFSET(hd_struct_dev) - OFFSET(gendisk_part0); } /* 2.6.24 < kernel version <= 2.6.27 */ static int match_list(struct iter *i, unsigned long entry) { unsigned long device_address; unsigned long device_type; device_address = entry - OFFSET(device_node); readmem(device_address + OFFSET(device_type), KVADDR, &device_type, sizeof(device_type), "device.type", FAULT_ON_ERROR); if (device_type != i->type_address) return FALSE; return TRUE; } /* kernel version > 2.6.27 */ static int match_klist(struct iter *i, unsigned long entry) { unsigned long device_address; unsigned long device_type; device_address = entry - OFFSET(device_knode_class); readmem(device_address + OFFSET(device_type), KVADDR, &device_type, sizeof(device_type), "device.type", FAULT_ON_ERROR); if (device_type != i->type_address) return FALSE; return TRUE; } /* old kernel(version <= 2.6.27): list */ static unsigned long next_disk_list(struct iter *i) { unsigned long list_head_address, next_address; if (i->current_address) { list_head_address = i->current_address; } else { list_head_address = i->head_address; } again: /* read list_head.next */ readmem(list_head_address + OFFSET(list_head_next), KVADDR, &next_address, sizeof(next_address), "list_head.next", FAULT_ON_ERROR); if (next_address == i->head_address) return 0; if (i->match && !i->match(i, next_address)) { list_head_address = next_address; goto again; } i->current_address = next_address; return i->get_gendisk(next_address); } /* new kernel(version > 2.6.27): klist */ static unsigned long next_disk_klist(struct iter* i) { unsigned long klist_node_address, list_head_address, next_address; unsigned long n_klist; if (i->current_address) { list_head_address = i->current_address; } else { list_head_address = i->head_address; } again: /* read list_head.next */ readmem(list_head_address + OFFSET(list_head_next), KVADDR, &next_address, sizeof(next_address), "list_head.next", FAULT_ON_ERROR); /* skip dead klist_node */ while(next_address != i->head_address) { klist_node_address = next_address - OFFSET(klist_node_n_node); readmem(klist_node_address + OFFSET(klist_node_n_klist), KVADDR, &n_klist, sizeof(n_klist), "klist_node.n_klist", FAULT_ON_ERROR); if (!(n_klist & 1)) break; /* the klist_node is dead, skip to next klist_node */ readmem(next_address + OFFSET(list_head_next), KVADDR, &next_address, sizeof(next_address), "list_head.next", FAULT_ON_ERROR); } if (next_address == i->head_address) return 0; if (i->match && !i->match(i, klist_node_address)) { list_head_address = next_address; goto again; } i->current_address = next_address; return i->get_gendisk(klist_node_address); } /* read request_queue.rq.count[2] */ static void get_diskio_1(unsigned long rq, struct diskio *io) { int count[2]; readmem(rq + OFFSET(request_queue_rq) + OFFSET(request_list_count), KVADDR, count, sizeof(int) * 2, "request_list.count", FAULT_ON_ERROR); io->read = count[0]; io->write = count[1]; } /* request_queue.in_flight contains total requests */ static unsigned int get_in_flight_1(unsigned long rq) { unsigned int in_flight; readmem(rq+ OFFSET(request_queue_in_flight), KVADDR, &in_flight, sizeof(uint), "request_queue.in_flight", FAULT_ON_ERROR); return in_flight; } /* request_queue.in_flight[2] contains read/write requests */ static unsigned int get_in_flight_2(unsigned long rq) { unsigned int in_flight[2]; readmem(rq+ OFFSET(request_queue_in_flight), KVADDR, in_flight, sizeof(uint) * 2, "request_queue.in_flight", FAULT_ON_ERROR); return in_flight[0] + in_flight[1]; } static void init_iter(struct iter *i) { ARRAY_LENGTH_INIT(i->diskname_len, gendisk.disk_name, "gendisk.disk_name", NULL, sizeof(char)); if (i->diskname_len < 0 || i->diskname_len > BUFSIZE) { option_not_supported('d'); return; } i->current_address = 0; /* check whether BLK_RW_SYNC exists */ i->sync_count = get_symbol_type("BLK_RW_SYNC", NULL, NULL) == TYPE_CODE_ENUM; if (SIZE(rq_in_flight) == sizeof(int)) { i->get_in_flight = get_in_flight_1; } else if (SIZE(rq_in_flight) == sizeof(int) * 2) { i->get_in_flight = get_in_flight_2; } else { option_not_supported('d'); return; } i->get_diskio = get_diskio_1; if (symbol_exists("block_subsys") || symbol_exists("block_kset")) { /* kernel version <= 2.6.24 */ unsigned long block_subsys_addr; if (symbol_exists("block_subsys")) block_subsys_addr = symbol_value("block_subsys"); else block_subsys_addr = symbol_value("block_kset"); if (VALID_STRUCT(subsystem)) i->head_address = block_subsys_addr + OFFSET(subsystem_kset) + OFFSET(kset_list); else i->head_address = block_subsys_addr + OFFSET(kset_list); i->type_address = 0; i->next_disk = next_disk_list; i->match = NULL; i->get_gendisk = get_gendisk_1; } else if (symbol_exists("block_class")) { unsigned long block_class_addr = symbol_value("block_class"); i->type_address = symbol_value("disk_type"); if (VALID_MEMBER(class_devices) || (VALID_MEMBER(class_private_devices) && SIZE(class_private_devices) == SIZE(list_head))) { /* 2.6.24 < kernel version <= 2.6.27, list */ if (!VALID_STRUCT(class_private)) { /* 2.6.24 < kernel version <= 2.6.26 */ i->head_address = block_class_addr + OFFSET(class_devices); } else { /* kernel version is 2.6.27 */ unsigned long class_private_addr; readmem(block_class_addr + OFFSET(class_p), KVADDR, &class_private_addr, sizeof(class_private_addr), "class.p", FAULT_ON_ERROR); i->head_address = class_private_addr + OFFSET(class_private_devices); } i->next_disk = next_disk_list; i->match = match_list; i->get_gendisk = get_gendisk_2; } else { /* kernel version > 2.6.27, klist */ unsigned long class_private_addr; readmem(block_class_addr + OFFSET(class_p), KVADDR, &class_private_addr, sizeof(class_private_addr), "class.p", FAULT_ON_ERROR); if (VALID_STRUCT(class_private)) { /* 2.6.27 < kernel version <= 2.6.37-rc2 */ i->head_address = class_private_addr + OFFSET(class_private_devices); } else { /* kernel version > 2.6.37-rc2 */ i->head_address = class_private_addr + OFFSET(subsys_private_klist_devices); } i->head_address += OFFSET(klist_k_list); i->next_disk = next_disk_klist; i->match = match_klist; if (VALID_MEMBER(gendisk_dev)) i->get_gendisk = get_gendisk_3; else i->get_gendisk = get_gendisk_4; } } else { option_not_supported('d'); return; } } static void display_one_diskio(struct iter *i, unsigned long gendisk) { char disk_name[BUFSIZE + 1]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; int major; unsigned long queue_addr; unsigned int in_flight; struct diskio io; memset(disk_name, 0, BUFSIZE + 1); readmem(gendisk + OFFSET(gendisk_disk_name), KVADDR, disk_name, i->diskname_len, "gen_disk.disk_name", FAULT_ON_ERROR); if (is_skipped_disk(disk_name)) return; readmem(gendisk + OFFSET(gendisk_queue), KVADDR, &queue_addr, sizeof(ulong), "gen_disk.queue", FAULT_ON_ERROR); readmem(gendisk + OFFSET(gendisk_major), KVADDR, &major, sizeof(int), "gen_disk.major", FAULT_ON_ERROR); i->get_diskio(queue_addr, &io); in_flight = i->get_in_flight(queue_addr); fprintf(fp, "%s%s%s %s%s%s%s %s%5d%s%s%s%s%s%5u\n", mkstring(buf0, 5, RJUST|INT_DEC, (char *)(unsigned long)major), space(MINSPACE), mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, (char *)gendisk), space(MINSPACE), mkstring(buf2, 10, LJUST, disk_name), space(MINSPACE), mkstring(buf3, VADDR_PRLEN <= 11 ? 11 : VADDR_PRLEN, LJUST|LONG_HEX, (char *)queue_addr), space(MINSPACE), io.read + io.write, space(MINSPACE), mkstring(buf4, 5, RJUST|INT_DEC, (char *)(unsigned long)io.read), space(MINSPACE), mkstring(buf5, 5, RJUST|INT_DEC, (char *)(unsigned long)io.write), space(MINSPACE), in_flight); } static void display_all_diskio(void) { struct iter i; unsigned long gendisk; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; init_iter(&i); fprintf(fp, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", "MAJOR", space(MINSPACE), mkstring(buf0, VADDR_PRLEN + 2, LJUST, "GENDISK"), space(MINSPACE), "NAME ", space(MINSPACE), mkstring(buf1, VADDR_PRLEN <= 11 ? 13 : VADDR_PRLEN + 2, LJUST, "REQUEST_QUEUE"), space(MINSPACE), mkstring(buf2, 5, RJUST, "TOTAL"), space(MINSPACE), i.sync_count ? mkstring(buf3, 5, RJUST, "ASYNC") : mkstring(buf3, 5, RJUST, "READ"), space(MINSPACE), i.sync_count ? mkstring(buf4, 5, RJUST, "SYNC") : mkstring(buf4, 5, RJUST, "WRITE"), space(MINSPACE), mkstring(buf5, 5, RJUST, "DRV")); while ((gendisk = i.next_disk(&i)) != 0) display_one_diskio(&i, gendisk); } static void diskio_init(void) { if (dt->flags & DISKIO_INIT) return; MEMBER_OFFSET_INIT(class_devices, "class", "class_devices"); if (INVALID_MEMBER(class_devices)) MEMBER_OFFSET_INIT(class_devices, "class", "devices"); MEMBER_OFFSET_INIT(class_p, "class", "p"); MEMBER_OFFSET_INIT(class_private_devices, "class_private", "class_devices"); MEMBER_OFFSET_INIT(device_knode_class, "device", "knode_class"); MEMBER_OFFSET_INIT(device_node, "device", "node"); MEMBER_OFFSET_INIT(device_type, "device", "type"); MEMBER_OFFSET_INIT(gendisk_dev, "gendisk", "dev"); if (INVALID_MEMBER(gendisk_dev)) MEMBER_OFFSET_INIT(gendisk_dev, "gendisk", "__dev"); MEMBER_OFFSET_INIT(gendisk_kobj, "gendisk", "kobj"); MEMBER_OFFSET_INIT(gendisk_part0, "gendisk", "part0"); MEMBER_OFFSET_INIT(gendisk_queue, "gendisk", "queue"); MEMBER_OFFSET_INIT(hd_struct_dev, "hd_struct", "__dev"); MEMBER_OFFSET_INIT(klist_k_list, "klist", "k_list"); MEMBER_OFFSET_INIT(klist_node_n_klist, "klist_node", "n_klist"); MEMBER_OFFSET_INIT(klist_node_n_node, "klist_node", "n_node"); MEMBER_OFFSET_INIT(kobject_entry, "kobject", "entry"); MEMBER_OFFSET_INIT(kset_list, "kset", "list"); MEMBER_OFFSET_INIT(request_list_count, "request_list", "count"); MEMBER_OFFSET_INIT(request_queue_in_flight, "request_queue", "in_flight"); if (MEMBER_EXISTS("request_queue", "rq")) MEMBER_OFFSET_INIT(request_queue_rq, "request_queue", "rq"); else MEMBER_OFFSET_INIT(request_queue_rq, "request_queue", "root_rl"); MEMBER_OFFSET_INIT(subsys_private_klist_devices, "subsys_private", "klist_devices"); MEMBER_OFFSET_INIT(subsystem_kset, "subsystem", "kset"); STRUCT_SIZE_INIT(subsystem, "subsystem"); STRUCT_SIZE_INIT(class_private, "class_private"); MEMBER_SIZE_INIT(rq_in_flight, "request_queue", "in_flight"); MEMBER_SIZE_INIT(class_private_devices, "class_private", "class_devices"); dt->flags |= DISKIO_INIT; } static void diskio_option(void) { diskio_init(); display_all_diskio(); } crash-7.1.4/lkcd_dump_v7.h0000664000000000000000000003353712634305150014064 0ustar rootroot/* lkcd_dump_v5.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net) * Copyright 2001 Matt D. Robinson. All rights reserved. * * Most of this is the same old stuff from vmdump.h, except now we're * actually a stand-alone driver plugged into the block layer interface, * with the exception that we now allow for compression modes externally * loaded (e.g., someone can come up with their own). */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _DUMP_H #define _DUMP_H //#include /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #ifndef MCLX /* * MCLX NOTE: the architecture-specific headers are being ignored until * deemed necessary; crash has never used them functionally, and only * referencing them in the dump_sgi_environment() helper routines. */ /* necessary header files */ #include /* for architecture-specific header */ #endif #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ void *dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; #ifdef IA64 #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs */ struct pt_regs *dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; void * dha_smp_current_task[NR_CPUS]; void * dha_stack[NR_CPUS]; void * dha_switch_stack[NR_CPUS]; } dump_header_asm_t; #define NR_CPUS 32 typedef struct _dump_header_asm_smp_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs */ struct pt_regs *dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; void * dha_smp_current_task[NR_CPUS]; void * dha_stack[NR_CPUS]; void * dha_switch_stack[NR_CPUS]; } dump_header_asm_smp_t; #endif #ifdef __KERNEL__ /* * Structure: dump_compress_t * Function: This is what an individual compression mechanism can use * to plug in their own compression techniques. It's always * best to build these as individual modules so that people * can put in whatever they want. */ typedef struct dump_compress_s { /* the list_head structure for list storage */ struct list_head list; /* the type of compression to use (DUMP_COMPRESS_XXX) */ int compress_type; /* the compression function to call */ int (*compress_func)(char *, int, char *, int); } dump_compress_t; extern int dump_init(void); extern void dump_execute(char *, struct pt_regs *); extern int page_is_ram(unsigned long); #endif /* __KERNEL__ */ #endif /* _DUMP_H */ crash-7.1.4/qemu.c0000664000000000000000000001773712634305150012454 0ustar rootroot/* * Derive kernel base from a QEMU saved VM file * * Copyright (C) 2009, 2010 Red Hat, Inc. * Written by Paolo Bonzini. * * Portions Copyright (C) 2009 David Anderson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include #include #include #include #include "qemu-load.h" #include "kvmdump.h" /* * Some bits we need to access in the control registers and page tables. */ #define MSR_EFER_LMA (1 << 10) #define PG_PRESENT_MASK (1 << 0) #define PG_PSE_MASK (1 << 7) #define CR0_PG_MASK (1 << 31) #define CR4_PAE_MASK (1 << 31) #define CR4_PSE_MASK (1 << 31) static uint32_t ldl (struct qemu_device_x86 *dx86, struct qemu_device_ram *dram, uint64_t addr) { char buf[4096]; if (dx86->a20_masked) addr &= ~(1LL<<20); if (!ram_read_phys_page (dram, buf, addr & ~0xfff)) return 0; assert ((addr & 0xfff) <= 0xffc); return *(uint32_t *)(buf + (addr & 0xfff)); } static uint64_t ldq (struct qemu_device_x86 *dx86, struct qemu_device_ram *dram, uint64_t addr) { char buf[4096]; if (dx86->a20_masked) addr &= ~(1LL<<20); if (!ram_read_phys_page (dram, buf, addr & ~0xfff)) return 0; assert ((addr & 0xfff) <= 0xff8); return *(uint64_t *)(buf + (addr & 0xfff)); } /* * Messy x86 TLB fault logic, walking the page tables to find the physical * address corresponding to ADDR. Taken from QEMU. */ static uint64_t get_phys_page(struct qemu_device_x86 *dx86, struct qemu_device_ram *dram, uint64_t addr) { uint64_t pde_addr, pte_addr; uint64_t pte, paddr; uint32_t page_offset; int page_size; if ((dx86->cr4 & CR4_PAE_MASK) || (dx86->efer & MSR_EFER_LMA)) { uint64_t pdpe_addr; uint64_t pde, pdpe; if (dx86->cr4 & CR4_PAE_MASK) dprintf ("PAE active\n"); if (dx86->efer & MSR_EFER_LMA) { uint64_t pml4e_addr, pml4e; int32_t sext; dprintf ("long mode active\n"); /* test virtual address sign extension */ sext = (int64_t) addr >> 47; if (sext != 0 && sext != -1) return -1; pml4e_addr = ((dx86->cr3 & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)); pml4e = ldq (dx86, dram, pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) return -1; dprintf ("PML4 page present\n"); pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)); pdpe = ldq (dx86, dram, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) return -1; dprintf ("PDPE page present\n"); } else { dprintf ("long mode inactive\n"); pdpe_addr = ((dx86->cr3 & ~0x1f) + ((addr >> 27) & 0x18)); pdpe = ldq (dx86, dram, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) return -1; dprintf ("PDPE page present\n"); } pde_addr = (pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3); pde = ldq (dx86, dram, pde_addr); if (!(pde & PG_PRESENT_MASK)) return -1; dprintf ("PDE page present\n"); if (pde & PG_PSE_MASK) { /* 2 MB page */ dprintf ("2MB page\n"); page_size = 2048 * 1024; pte = pde & ~((page_size - 1) & ~0xfff); } else { /* 4 KB page */ dprintf ("4 KB PAE page\n"); pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)); page_size = 4096; pte = ldq (dx86, dram, pte_addr); if (!(pte & PG_PRESENT_MASK)) return -1; dprintf ("PTE page present\n"); } } else { /* Not PAE. */ uint32_t pde; if (!(dx86->cr0 & CR0_PG_MASK)) { dprintf ("Paging inactive\n"); pte = addr; page_size = 4096; } else { /* page directory entry */ pde_addr = ((dx86->cr3 & ~0xfff) + ((addr >> 20) & 0xffc)); pde = ldl (dx86, dram, pde_addr); if (!(pde & PG_PRESENT_MASK)) return -1; dprintf ("PDE page present\n"); if ((pde & PG_PSE_MASK) && (dx86->cr4 & CR4_PSE_MASK)) { page_size = 4096 * 1024; pte = pde & ~((page_size - 1) & ~0xfff); } else { page_size = 4096; pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)); pte = ldl (dx86, dram, pte_addr); if (!(pte & PG_PRESENT_MASK)) return -1; dprintf ("PTE page present\n"); } } } page_offset = (addr & 0xfff) & (page_size - 1); paddr = (pte & ~0xfff) + page_offset; return paddr; } /* * I'm using the IDT base as a quick way to find the bottom of the * kernel memory. */ static uint64_t get_idt_base(struct qemu_device_list *dl) { struct qemu_device_x86 *dx86 = (struct qemu_device_x86 *) device_find_instance (dl, "cpu", 0); return dx86->idt.base; } static uint64_t get_kernel_base(struct qemu_device_list *dl) { int i; uint64_t kernel_base = -1; uint64_t base_vaddr, last, mask; struct qemu_device_x86 *dx86 = (struct qemu_device_x86 *) device_find_instance (dl, "cpu", 0); struct qemu_device_ram *dram = (struct qemu_device_ram *) device_find_instance (dl, "ram", 0); for (i = 30, last = -1; (kernel_base == -1) && (i >= 20); i--) { mask = ~((1LL << i) - 1); base_vaddr = dx86->idt.base & mask; if (base_vaddr == last) continue; if (base_vaddr < kvm->kvbase) { fprintf(stderr, "WARNING: IDT base contains: %llx\n " "cannot determine physical base address: defaulting to 0\n\n", (unsigned long long)base_vaddr); return 0; } dprintf("get_kernel_base: %llx\n", (unsigned long long)base_vaddr); kernel_base = get_phys_page(dx86, dram, base_vaddr); last = base_vaddr; } if (kernel_base != -1) { dprintf("kvbase: %llx vaddr used: %llx physical: %llx\n", (unsigned long long)kvm->kvbase, (unsigned long long)base_vaddr, (unsigned long long)kernel_base); /* * Subtract the offset between the virtual address used * and the kernel's base virtual address. */ kernel_base -= (base_vaddr - kvm->kvbase); } else { dprintf("WARNING: cannot determine physical base address:" " defaulting to 0\n\n"); kernel_base = 0; kvm->flags |= NO_PHYS_BASE; } return kernel_base; } #ifdef MAIN_FROM_TEST_C int main (int argc, char **argv) { struct qemu_device_list *dl; FILE *fp; if (argc != 2) { fprintf (stderr, "Usage: test SAVE-FILE\n"); exit (1); } fp = fopen(argv[1], "r"); if (!fp) { fprintf (stderr, "Error: %s\n", strerror (errno)); exit (1); } #ifdef HOST_32BIT dl = qemu_load (devices_x86_32, QEMU_FEATURE_CPU|QEMU_FEATURE_RAM, fp); #else dl = qemu_load (devices_x86_64, QEMU_FEATURE_CPU|QEMU_FEATURE_RAM, fp); #endif printf ("IDT at %llx\n", get_idt_base (dl)); printf ("Physical kernel base at %llx\n", get_kernel_base (dl)); device_list_free (dl); fclose (fp); exit (0); } #endif /* * crash utility adaptation */ #include "defs.h" int qemu_init(char *filename) { struct qemu_device_list *dl; struct qemu_device_ram *dram; uint64_t idt = 0; if (CRASHDEBUG(1)) dump_qemu_header(kvm->ofp); rewind(kvm->vmp); if (kvm->flags & (MAPFILE|MAPFILE_APPENDED)) return TRUE; please_wait("scanning KVM dumpfile"); if (kvm->flags & KVMHOST_32) dl = qemu_load(devices_x86_32, QEMU_FEATURE_CPU|QEMU_FEATURE_RAM, kvm->vmp); else dl = qemu_load(devices_x86_64, QEMU_FEATURE_CPU|QEMU_FEATURE_RAM, kvm->vmp); please_wait_done(); if (dl) { if (machine_type("X86_64")) { idt = get_idt_base(dl); kvm->mapinfo.phys_base = get_kernel_base(dl); } dram = (struct qemu_device_ram *) device_find_instance (dl, "ram", 0); if (CRASHDEBUG(1)) { if (machine_type("X86_64")) { fprintf(kvm->ofp, "IDT: %llx\n", (ulonglong)idt); fprintf(kvm->ofp, "physical kernel base: %llx\n", (ulonglong)kvm->mapinfo.phys_base); } fprintf(kvm->ofp, "last RAM offset: %llx\n", (ulonglong)dram->last_ram_offset); } device_list_free (dl); } else fclose(kvm->vmp); return dl ? TRUE : FALSE; } crash-7.1.4/unwind_x86_64.h0000664000000000000000000000624712634305150014026 0ustar rootroot/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define CONFIG_64BIT 1 #define NULL ((void *)0) typedef unsigned long size_t; typedef unsigned char u8; typedef signed short s16; typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; typedef unsigned long long u64; struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long rbp; unsigned long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long rax; unsigned long rcx; unsigned long rdx; unsigned long rsi; unsigned long rdi; unsigned long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ unsigned long rip; unsigned long cs; unsigned long eflags; unsigned long rsp; unsigned long ss; /* top of stack page */ }; struct unwind_frame_info { struct pt_regs regs; }; extern int unwind(struct unwind_frame_info *, int); extern void init_unwind_table(void); extern void free_unwind_table(void); #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define get_unaligned(ptr) (*(ptr)) //#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define UNW_PC(frame) (frame)->regs.rip #define UNW_SP(frame) (frame)->regs.rsp #ifdef CONFIG_FRAME_POINTER #define UNW_FP(frame) (frame)->regs.rbp #define FRAME_RETADDR_OFFSET 8 #define FRAME_LINK_OFFSET 0 #define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1)) #define STACK_TOP(tsk) ((tsk)->thread.rsp0) #endif #define EXTRA_INFO(f) { BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) % FIELD_SIZEOF(struct unwind_frame_info, f)) + offsetof(struct unwind_frame_info, f)/ FIELD_SIZEOF(struct unwind_frame_info, f), FIELD_SIZEOF(struct unwind_frame_info, f) } #define PTREGS_INFO(f) EXTRA_INFO(regs.f) #define UNW_REGISTER_INFO \ PTREGS_INFO(rax),\ PTREGS_INFO(rdx),\ PTREGS_INFO(rcx),\ PTREGS_INFO(rbx), \ PTREGS_INFO(rsi), \ PTREGS_INFO(rdi), \ PTREGS_INFO(rbp), \ PTREGS_INFO(rsp), \ PTREGS_INFO(r8), \ PTREGS_INFO(r9), \ PTREGS_INFO(r10),\ PTREGS_INFO(r11), \ PTREGS_INFO(r12), \ PTREGS_INFO(r13), \ PTREGS_INFO(r14), \ PTREGS_INFO(r15), \ PTREGS_INFO(rip) crash-7.1.4/lkcd_fix_mem.c0000664000000000000000000000464412634305150014117 0ustar rootroot/* lkcd_fix_mem.c * * Copyright (C) 2004 Hewlett-Packard Development Company, L.P. * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef IA64 #define LKCD_COMMON #include "defs.h" #include "lkcd_dump_v8.h" static int fix_addr(dump_header_asm_t *); int fix_addr_v8(dump_header_asm_t *dha) { fix_addr(dha); return 0; } int fix_addr_v7(int fd) { static dump_header_asm_t dump_header_asm_v7 = { 0 }; dump_header_asm_t *dha; dha = &dump_header_asm_v7; if (read(lkcd->fd, dha, sizeof(dump_header_asm_t)) != sizeof(dump_header_asm_t)) return -1; fix_addr(dha); return 0; } static int fix_addr(dump_header_asm_t *dha) { lkcd->dump_header_asm = dha; if (dha->dha_magic_number == DUMP_ASM_MAGIC_NUMBER && dha->dha_version > 3) { int num; int i = 0; num = dha->dha_smp_num_cpus; lkcd->fix_addr_num = 0; if (num && (lkcd->fix_addr = malloc(num * sizeof(struct fix_addrs)))) { while (i < num) { if (dha->dha_stack[i] && dha->dha_smp_current_task[i]) { lkcd->fix_addr[i].task = (ulong)dha->dha_smp_current_task[i]; lkcd->fix_addr[i].saddr = (ulong)dha->dha_stack[i]; lkcd->fix_addr[i].sw = (ulong)dha->dha_stack_ptr[i]; /* remember the highest non-zero entry */ lkcd->fix_addr_num = i + 1; } else { lkcd->fix_addr[i].task = (ulong)0; } i++; } } } return 0; } ulong get_lkcd_switch_stack(ulong task) { int i; if (lkcd->fix_addr_num == 0) return 0; for (i = 0; i < lkcd->fix_addr_num; i++) { if (task == lkcd->fix_addr[i].task) { return lkcd->fix_addr[i].sw; } } return 0; } int lkcd_get_kernel_start_v8(ulong *addr) { if (!addr) return 0; *addr = ((dump_header_asm_t *)lkcd->dump_header_asm)->dha_kernel_addr; return 1; } #endif // IA64 crash-7.1.4/vmware_vmss.c0000664000000000000000000002171612634305150014046 0ustar rootroot/* * vmware_vmss.c * * Copyright (c) 2015 VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Dyno Hongjun Fu */ #include "defs.h" #include "vmware_vmss.h" #define LOGPRX "vmw: " /* VMware only supports X86/X86_64 virtual machines. */ #define VMW_PAGE_SIZE (4096) #define VMW_PAGE_SHIFT (12) static vmssdata vmss = { 0 }; int is_vmware_vmss(char *filename) { struct cptdumpheader hdr; FILE *fp; if ((fp = fopen(filename, "r")) == NULL) { error(INFO, LOGPRX"Failed to open '%s': [Error %d] %s\n", filename, errno, strerror(errno)); return FALSE; } if (fread(&hdr, sizeof(cptdumpheader), 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s': [Error %d] %s\n", filename, errno, strerror(errno)); fclose(fp); return FALSE; } fclose(fp); if (hdr.id != CPTDUMP_OLD_MAGIC_NUMBER && hdr.id != CPTDUMP_MAGIC_NUMBER && hdr.id != CPTDUMP_PARTIAL_MAGIC_NUMBER && hdr.id != CPTDUMP_RESTORED_MAGIC_NUMBER && hdr.id != CPTDUMP_NORESTORE_MAGIC_NUMBER) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Unrecognized .vmss file (magic %x).\n", hdr.id); return FALSE; } return TRUE; } int vmware_vmss_init(char *filename, FILE *ofp) { cptdumpheader hdr; cptgroupdesc *grps = NULL; unsigned grpsize; unsigned i; FILE *fp = NULL; int result = TRUE; if (!machine_type("X86") && !machine_type("X86_64")) { error(INFO, LOGPRX"Invalid or unsupported host architecture for .vmss file: %s\n", MACHINE_TYPE); result = FALSE; goto exit; } if ((fp = fopen(filename, "r")) == NULL) { error(INFO, LOGPRX"Failed to open '%s': %s [Error %d] %s\n", filename, errno, strerror(errno)); result = FALSE; goto exit; } if (fread(&hdr, sizeof(cptdumpheader), 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s': %s [Error %d] %s\n", filename, errno, strerror(errno)); result = FALSE; goto exit; } DEBUG_PARSE_PRINT((ofp, LOGPRX"Header: id=%x version=%d numgroups=%d\n", hdr.id, hdr.version, hdr.numgroups)); vmss.cpt64bit = (hdr.id != CPTDUMP_OLD_MAGIC_NUMBER); DEBUG_PARSE_PRINT((ofp, LOGPRX"Checkpoint is %d-bit\n", vmss.cpt64bit ? 64 : 32)); if (!vmss.cpt64bit) { error(INFO, LOGPRX"Not implemented for 32-bit VMSS file!\n"); result = FALSE; goto exit; } grpsize = hdr.numgroups * sizeof (cptgroupdesc); grps = (cptgroupdesc *) malloc(grpsize * sizeof(cptgroupdesc)); if (grps == NULL) { error(INFO, LOGPRX"Failed to allocate memory! [Error %d] %s\n", errno, strerror(errno)); result = FALSE; goto exit; } if (fread(grps, sizeof(cptgroupdesc), grpsize, fp) != grpsize) { error(INFO, LOGPRX"Failed to read '%s': [Error %d] %s\n", filename, errno, strerror(errno)); result = FALSE; goto exit; } for (i = 0; i < hdr.numgroups; i++) { if (fseek(fp, grps[i].position, SEEK_SET) == -1) { error(INFO, LOGPRX"Bad offset of VMSS Group['%s'] in '%s' at %#llx.\n", grps[i].name, filename, (ulonglong)grps[i].position); continue; } DEBUG_PARSE_PRINT((ofp, LOGPRX"Group: %-20s offset=%#llx size=0x%#llx.\n", grps[i].name, (ulonglong)grps[i].position, (ulonglong)grps[i].size)); if (strcmp(grps[i].name, "memory") != 0) { continue; } for (;;) { uint16_t tag; char name[TAG_NAMELEN_MASK + 1]; unsigned nameLen; unsigned nindx; int idx[3]; unsigned j; int nextgroup = FALSE; if (fread(&tag, sizeof(tag), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read tag.\n"); break; } if (tag == NULL_TAG) break; nameLen = TAG_NAMELEN(tag); if (fread(name, nameLen, 1, fp) != 1) { error(INFO, LOGPRX"Cannot read tag name.\n"); break; } name[nameLen] = 0; DEBUG_PARSE_PRINT((ofp, LOGPRX"\t Item %20s", name)); nindx = TAG_NINDX(tag); if (nindx > 3) { error(INFO, LOGPRX"Too many indexes %d (> 3).\n", nindx); break; } idx[0] = idx[1] = idx[2] = NO_INDEX; for (j= 0; j < nindx; j++) { if (fread(&idx[j], sizeof(idx[0]), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read index.\n"); nextgroup = TRUE; break; } DEBUG_PARSE_PRINT((ofp, "[%d]", idx[j])); } if (nextgroup) break; if (IS_BLOCK_TAG(tag)) { uint64_t nbytes; uint64_t blockpos; uint64_t nbytesinmem; int compressed = IS_BLOCK_COMPRESSED_TAG(tag); uint16_t padsize; if (fread(&nbytes, sizeof(nbytes), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read block size.\n"); break; } if (fread(&nbytesinmem, sizeof(nbytesinmem), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read block memory size.\n"); break; } if (fread(&padsize, sizeof(padsize), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read block padding size.\n"); break; } if ((blockpos = ftell(fp)) == -1) { error(INFO, LOGPRX"Cannot determine location within VMSS file.\n"); break; } blockpos += padsize; if (fseek(fp, blockpos + nbytes, SEEK_SET) == -1) { error(INFO, LOGPRX"Cannot seek past block at %#llx.\n", (ulonglong)(blockpos + nbytes)); break; } if (strcmp(name, "Memory") == 0) { /* The things that we really care about...*/ vmss.memoffset = blockpos; vmss.memsize = nbytesinmem; DEBUG_PARSE_PRINT((ofp, "\t=> %sBLOCK: position=%#llx size=%#llx memsize=%#llx\n", compressed ? "COMPRESSED " : "", (ulonglong)blockpos, (ulonglong)nbytes, (ulonglong)nbytesinmem)); if (compressed) { error(INFO, LOGPRX"Cannot handle compressed memory dump yet!\n"); result = FALSE; goto exit; } } } else { union { uint8_t val[TAG_VALSIZE_MASK]; uint32_t val32; } u; unsigned k; unsigned valsize = TAG_VALSIZE(tag); uint64_t blockpos = ftell(fp); DEBUG_PARSE_PRINT((ofp, "\t=> position=%#llx size=%#x: ", (ulonglong)blockpos, valsize)); if (fread(u.val, sizeof(u.val[0]), valsize, fp) != valsize) { error(INFO, LOGPRX"Cannot read item.\n"); break; } for (k = 0; k < valsize; k++) { /* Assume Little Endian */ DEBUG_PARSE_PRINT((ofp, "%02X", u.val[valsize - k - 1])); } if (strcmp(grps[i].name, "memory") == 0) { if (strcmp(name, "regionsCount") == 0) { vmss.regionscount = u.val32; } if (strcmp(name, "regionPageNum") == 0) { vmss.regions[idx[0]].startpagenum = u.val32; } if (strcmp(name, "regionPPN") == 0) { vmss.regions[idx[0]].startppn = u.val32; } if (strcmp(name, "regionSize") == 0) { vmss.regions[idx[0]].size = u.val32; } if (strcmp(name, "align_mask") == 0) { vmss.alignmask = u.val32; } } DEBUG_PARSE_PRINT((ofp, "\n")); } } } if (vmss.memsize == 0) { char *vmem_filename, *p; fprintf(ofp, LOGPRX"Memory dump is not part of this vmss file.\n"); fclose(fp); fp = NULL; fprintf(ofp, LOGPRX"Try to locate the companion vmem file ...\n"); /* check the companion vmem file */ vmem_filename = strdup(filename); p = vmem_filename + strlen(vmem_filename) - 4; if (strcmp(p, "vmss") != 0 && strcmp(p, "vmsn") != 0) { free(vmem_filename); result = FALSE; goto exit; } strcpy(p, "vmem"); if ((fp = fopen(vmem_filename, "r")) == NULL) { error(INFO, LOGPRX"%s: %s\n", vmem_filename, strerror(errno)); free(vmem_filename); result = FALSE; goto exit; } fseek(fp, 0L, SEEK_END); vmss.memsize = ftell(fp); fseek(fp, 0L, SEEK_SET); fprintf(ofp, LOGPRX"vmem file: %s\n\n", vmem_filename); free(vmem_filename); } vmss.dfp = fp; exit: if (grps) free(grps); if (!result && fp) fclose(fp); return result; } uint vmware_vmss_page_size(void) { return VMW_PAGE_SIZE; } int read_vmware_vmss(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { uint64_t pos = paddr; if (vmss.regionscount > 0) { /* Memory is divided into regions and there are holes between them. */ uint32_t ppn = (uint32_t) (pos >> VMW_PAGE_SHIFT); int i; for (i = 0; i < vmss.regionscount; i++) { if (ppn < vmss.regions[i].startppn) break; /* skip holes. */ pos -= ((vmss.regions[i].startppn - vmss.regions[i].startpagenum) << VMW_PAGE_SHIFT); } } if (pos + cnt > vmss.memsize) { error(INFO, LOGPRX"Read beyond the end of file! paddr=%#lx cnt=%d\n", paddr, cnt); } pos += vmss.memoffset; if (fseek(vmss.dfp, pos, SEEK_SET) != 0) return SEEK_ERROR; if (fread(bufptr, 1, cnt, vmss.dfp) != cnt) return READ_ERROR; return cnt; } int write_vmware_vmss(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return SEEK_ERROR; } crash-7.1.4/unwind.c0000664000000000000000000024413712634305150013005 0ustar rootroot/* * Copyright (C) 1999-2002 Hewlett-Packard Co * David Mosberger-Tang */ /* * unwind.c * * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010, 2012 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010, 2012 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * arch/ia64/kernel/unwind.c (kernel-2.4.18-6.23) */ #ifdef IA64 /* * WARNING: unw_frame_info, pt_regs and switch_stack have been * copied to unwind.h, under the UNWIND_V[123] sections; this is * done to rectify the need for this user-land code to use the same * data structures that the target kernel is using. * * Basically it's a juggling match to keep the unw_frame_info, * switch_stack and pt_regs structures in a "known" state -- as defined by * the UNWIND_V[123] definitions used in the unwind.h header file -- and * then passed to the 3 compile lines of unwind.c to create the three * unwind_v[123].o object files. */ /* * 2004-09-14 J. Nomura Added OS_INIT handling */ /* #include can't include this -- it's changing over time! */ #include "defs.h" #include "xen_hyper_defs.h" typedef unsigned char u8; typedef unsigned long long u64; #undef PAGE_SIZE #define PAGE_SIZE PAGESIZE() #define GATE_ADDR (0xa000000000000000 + PAGE_SIZE) #define CLEAR_SCRIPT_CACHE (TRUE) #define _ASM_IA64_FPU_H #include "unwind.h" #include "unwind_i.h" #include "rse.h" static struct unw_reg_state *alloc_reg_state(void); static void free_reg_state(struct unw_reg_state *); static void rse_function_params(struct bt_info *bt, struct unw_frame_info *, char *); static int load_unw_table(int); static void verify_unw_member(char *, long); static void verify_common_struct(char *, long); static void dump_unwind_table(struct unw_table *); static int unw_init_from_blocked_task(struct unw_frame_info *, struct bt_info *); static void unw_init_from_interruption(struct unw_frame_info *, struct bt_info *, ulong, ulong); static int unw_switch_from_osinit_v1(struct unw_frame_info *, struct bt_info *); static int unw_switch_from_osinit_v2(struct unw_frame_info *, struct bt_info *); static int unw_switch_from_osinit_v3(struct unw_frame_info *, struct bt_info *, char *); static unsigned long get_init_stack_ulong(unsigned long addr); static void unw_init_frame_info(struct unw_frame_info *, struct bt_info *, ulong); static int find_save_locs(struct unw_frame_info *); static int unw_unwind(struct unw_frame_info *); static void run_script(struct unw_script *, struct unw_frame_info *); static struct unw_script *script_lookup(struct unw_frame_info *); static struct unw_script *script_new(unsigned long); static void script_finalize(struct unw_script *, struct unw_state_record *); static void script_emit(struct unw_script *, struct unw_insn); static void emit_nat_info(struct unw_state_record *, int, struct unw_script *); static struct unw_script *build_script(struct unw_frame_info *); static struct unw_table_entry *lookup(struct unw_table *, unsigned long); static void compile_reg(struct unw_state_record *, int, struct unw_script *); static void compile_reg_v2(struct unw_state_record *, int, struct unw_script *); #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */ #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE) #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1) #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE) #define UNW_DEBUG 0 #define UNW_STATS 0 #define p5 5 #define pNonSys p5 /* complement of pSys */ # define STAT(x...) #define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0) #undef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) /* * Local snapshot of kernel's "unw" table, minus the spinlock_t and anything * after the kernel_table. This allows the unmodified porting of the kernel * code pieces that reference "unw.xxx" directly. * * The 2.6 kernel introduced a new pt_regs_offsets[32] array positioned in * between the preg_index array and the kernel_table members. */ #ifdef REDHAT static struct unw { #else static struct { spinlock_t lock; /* spinlock for unwind data */ #endif /* !REDHAT */ /* list of unwind tables (one per load-module) */ struct unw_table *tables; /* table of registers that prologues can save (and order in which they're saved): */ unsigned char save_order[8]; /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */ unsigned short sw_off[sizeof(struct unw_frame_info) / 8]; unsigned short lru_head; /* index of lead-recently used script */ unsigned short lru_tail; /* index of most-recently used script */ /* index into unw_frame_info for preserved register i */ unsigned short preg_index[UNW_NUM_REGS]; /* unwind table for the kernel: */ struct unw_table kernel_table; #ifndef REDHAT /* unwind table describing the gate page (kernel code that is mapped into user space): */ size_t gate_table_size; unsigned long *gate_table; /* hash table that maps instruction pointer to script index: */ unsigned short hash[UNW_HASH_SIZE]; /* script cache: */ struct unw_script cache[UNW_CACHE_SIZE]; # if UNW_DEBUG const char *preg_name[UNW_NUM_REGS]; # endif # if UNW_STATS struct { struct { int lookups; int hinted_hits; int normal_hits; int collision_chain_traversals; } cache; struct { unsigned long build_time; unsigned long run_time; unsigned long parse_time; int builds; int news; int collisions; int runs; } script; struct { unsigned long init_time; unsigned long unwind_time; int inits; int unwinds; } api; } stat; # endif #endif /* !REDHAT */ } unw = { 0 }; static short pt_regs_offsets[32] = { 0 }; static struct unw_reg_state * alloc_reg_state(void) { return((struct unw_reg_state *) GETBUF(sizeof(struct unw_reg_state))); } static void free_reg_state(struct unw_reg_state *rs) { FREEBUF(rs); } static struct unw_labeled_state * alloc_labeled_state(void) { return((struct unw_labeled_state *) GETBUF(sizeof(struct unw_labeled_state))); } static void free_labeled_state(struct unw_labeled_state *ls) { FREEBUF(ls); } typedef unsigned long unw_word; /* Unwind accessors. */ static inline unsigned long pt_regs_off_v2 (unsigned long reg) { short off = -1; if (reg < 32) off = pt_regs_offsets[reg]; if (off < 0) { if (reg > 0) error(INFO, "unwind: bad scratch reg r%lu\n", reg); off = 0; } return (unsigned long) off; } /* * Returns offset of rREG in struct pt_regs. */ static inline unsigned long pt_regs_off (unsigned long reg) { unsigned long off =0; if (machdep->flags & UNW_PTREGS) return pt_regs_off_v2(reg); if (reg >= 1 && reg <= 3) off = struct_offset(struct pt_regs, r1) + 8*(reg - 1); else if (reg <= 11) off = struct_offset(struct pt_regs, r8) + 8*(reg - 8); else if (reg <= 15) off = struct_offset(struct pt_regs, r12) + 8*(reg - 12); else if (reg <= 31) off = struct_offset(struct pt_regs, r16) + 8*(reg - 16); else if (reg > 0) error(INFO, "unwind: bad scratch reg r%lu\n", reg); return off; } #ifdef UNWIND_V1 static inline struct pt_regs * get_scratch_regs (struct unw_frame_info *info) { struct pt_regs *pt_unused = NULL; error(INFO, "get_scratch_regs: should not be here!\n"); return pt_unused; } #endif #ifdef UNWIND_V2 static inline struct pt_regs * get_scratch_regs (struct unw_frame_info *info) { if (!info->pt) { /* This should not happen with valid unwind info. */ error(INFO, "get_scratch_regs: bad unwind info: resetting info->pt\n"); if (info->flags & UNW_FLAG_INTERRUPT_FRAME) info->pt = (unsigned long)((struct pt_regs *) info->psp - 1); else info->pt = info->sp - 16; } return (struct pt_regs *) info->pt; } #endif #ifdef UNWIND_V3 static inline struct pt_regs * get_scratch_regs (struct unw_frame_info *info) { if (!info->pt) { /* This should not happen with valid unwind info. */ error(INFO, "get_scratch_regs: bad unwind info: resetting info->pt\n"); if (info->flags & UNW_FLAG_INTERRUPT_FRAME) info->pt = (unsigned long)((struct pt_regs *) info->psp - 1); else info->pt = info->sp - 16; } return (struct pt_regs *) info->pt; } #endif int #ifdef UNWIND_V1 unw_access_gr_v1 (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) #endif #ifdef UNWIND_V2 unw_access_gr_v2 (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) #endif #ifdef UNWIND_V3 unw_access_gr_v3 (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) #endif { unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat; struct unw_ireg *ireg; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if ((unsigned) regnum - 1 >= 127) { error(INFO, "unwind: trying to access non-existent r%u\n", regnum); return -1; } if (regnum < 32) { if (regnum >= 4 && regnum <= 7) { /* access a preserved register */ ireg = &info->r4 + (regnum - 4); addr = ireg->loc; if (addr) { nat_addr = addr + ireg->nat.off; switch (ireg->nat.type) { case UNW_NAT_VAL: /* simulate getf.sig/setf.sig */ if (write) { if (*nat) { /* write NaTVal and be done with it */ addr[0] = 0; addr[1] = 0x1fffe; return 0; } addr[1] = 0x1003e; } else { if (addr[0] == 0 && addr[1] == 0x1ffe) { /* return NaT and be done with it */ *val = 0; *nat = 1; return 0; } } /* fall through */ case UNW_NAT_NONE: dummy_nat = 0; nat_addr = &dummy_nat; break; case UNW_NAT_MEMSTK: nat_mask = (1UL << ((long) addr & 0x1f8)/8); break; case UNW_NAT_REGSTK: nat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) addr < info->regstk.limit || (unsigned long) addr >= info->regstk.top) { error(INFO, "unwind: %p outside of regstk " "[0x%lx-0x%lx)\n", (void *) addr, info->regstk.limit, info->regstk.top); return -1; } if ((unsigned long) nat_addr >= info->regstk.top) nat_addr = &info->sw->ar_rnat; nat_mask = (1UL << ia64_rse_slot_num(addr)); break; } } else { addr = &info->sw->r4 + (regnum - 4); nat_addr = &info->sw->ar_unat; nat_mask = (1UL << ((long) addr & 0x1f8)/8); } } else { /* access a scratch register */ if (machdep->flags & UNW_PTREGS) { pt = get_scratch_regs(info); addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum)); } else { if (info->flags & UNW_FLAG_INTERRUPT_FRAME) pt = (struct pt_regs *) info->psp - 1; else pt = (struct pt_regs *) info->sp - 1; addr = (unsigned long *) ((long) pt + pt_regs_off(regnum)); } if (info->pri_unat_loc) nat_addr = info->pri_unat_loc; else nat_addr = &info->sw->ar_unat; nat_mask = (1UL << ((long) addr & 0x1f8)/8); } } else { /* access a stacked register */ addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32); nat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) addr < info->regstk.limit || (unsigned long) addr >= info->regstk.top) { error(INFO, "unwind: ignoring attempt to access register outside of rbs\n"); return -1; } if ((unsigned long) nat_addr >= info->regstk.top) nat_addr = &info->sw->ar_rnat; nat_mask = (1UL << ia64_rse_slot_num(addr)); } if (write) { *addr = *val; if (*nat) *nat_addr |= nat_mask; else *nat_addr &= ~nat_mask; } else { if ((IA64_GET_STACK_ULONG(nat_addr) & nat_mask) == 0) { *val = IA64_GET_STACK_ULONG(addr); *nat = 0; } else { *val = 0; /* if register is a NaT, *addr may contain kernel data! */ *nat = 1; } } return 0; } int #ifdef UNWIND_V1 unw_access_br_v1 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif #ifdef UNWIND_V2 unw_access_br_v2 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif #ifdef UNWIND_V3 unw_access_br_v3 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif { unsigned long *addr; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if (info->flags & UNW_FLAG_INTERRUPT_FRAME) pt = (struct pt_regs *) info->psp - 1; else pt = (struct pt_regs *) info->sp - 1; switch (regnum) { /* scratch: */ case 0: addr = &pt->b0; break; case 6: addr = &pt->b6; break; case 7: addr = &pt->b7; break; /* preserved: */ case 1: case 2: case 3: case 4: case 5: addr = *(&info->b1_loc + (regnum - 1)); if (!addr) addr = &info->sw->b1 + (regnum - 1); break; default: error(INFO, "unwind: trying to access non-existent b%u\n", regnum); return -1; } if (write) *addr = *val; else *val = IA64_GET_STACK_ULONG(addr); return 0; } #ifdef UNWIND_V1 int unw_access_fr_v1 (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) { struct ia64_fpreg *addr = 0; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if ((unsigned) (regnum - 2) >= 126) { error(INFO, "unwind: trying to access non-existent f%u\n", regnum); return -1; } if (info->flags & UNW_FLAG_INTERRUPT_FRAME) pt = (struct pt_regs *) info->psp - 1; else pt = (struct pt_regs *) info->sp - 1; if (regnum <= 5) { addr = *(&info->f2_loc + (regnum - 2)); if (!addr) addr = &info->sw->f2 + (regnum - 2); } else if (regnum <= 15) { if (regnum <= 9) addr = &pt->f6 + (regnum - 6); else addr = &info->sw->f10 + (regnum - 10); } else if (regnum <= 31) { addr = info->fr_loc[regnum - 16]; if (!addr) addr = &info->sw->f16 + (regnum - 16); } else { #ifdef REDHAT struct bt_info *bt = (struct bt_info *)info->task; addr = (struct ia64_fpreg *) (bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_fph) + ((regnum - 32) * sizeof(struct ia64_fpreg))); #else struct task_struct *t = info->task; if (write) ia64_sync_fph(t); else ia64_flush_fph(t); addr = t->thread.fph + (regnum - 32); #endif } if (write) *addr = *val; else GET_STACK_DATA(addr, val, sizeof(struct ia64_fpreg)); return 0; } #endif #ifdef UNWIND_V2 int unw_access_fr_v2 (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) { struct ia64_fpreg *addr = 0; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if ((unsigned) (regnum - 2) >= 126) { error(INFO, "unwind: trying to access non-existent f%u\n", regnum); return -1; } if (regnum <= 5) { addr = *(&info->f2_loc + (regnum - 2)); if (!addr) addr = &info->sw->f2 + (regnum - 2); } else if (regnum <= 15) { if (regnum <= 11) { pt = get_scratch_regs(info); addr = &pt->f6 + (regnum - 6); } else addr = &info->sw->f12 + (regnum - 12); } else if (regnum <= 31) { addr = info->fr_loc[regnum - 16]; if (!addr) addr = &info->sw->f16 + (regnum - 16); } else { #ifdef REDHAT struct bt_info *bt = (struct bt_info *)info->task; addr = (struct ia64_fpreg *) (bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_fph) + ((regnum - 32) * sizeof(struct ia64_fpreg))); #else struct task_struct *t = info->task; if (write) ia64_sync_fph(t); else ia64_flush_fph(t); addr = t->thread.fph + (regnum - 32); #endif } if (write) *addr = *val; else GET_STACK_DATA(addr, val, sizeof(struct ia64_fpreg)); return 0; } #endif #ifdef UNWIND_V3 int unw_access_fr_v3 (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) { struct ia64_fpreg *addr = 0; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if ((unsigned) (regnum - 2) >= 126) { error(INFO, "unwind: trying to access non-existent f%u\n", regnum); return -1; } if (regnum <= 5) { addr = *(&info->f2_loc + (regnum - 2)); if (!addr) addr = &info->sw->f2 + (regnum - 2); } else if (regnum <= 15) { if (regnum <= 11) { pt = get_scratch_regs(info); addr = &pt->f6 + (regnum - 6); } else addr = &info->sw->f12 + (regnum - 12); } else if (regnum <= 31) { addr = info->fr_loc[regnum - 16]; if (!addr) addr = &info->sw->f16 + (regnum - 16); } else { #ifdef REDHAT struct bt_info *bt = (struct bt_info *)info->task; addr = (struct ia64_fpreg *) (bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_fph) + ((regnum - 32) * sizeof(struct ia64_fpreg))); #else struct task_struct *t = info->task; if (write) ia64_sync_fph(t); else ia64_flush_fph(t); addr = t->thread.fph + (regnum - 32); #endif } if (write) *addr = *val; else GET_STACK_DATA(addr, val, sizeof(struct ia64_fpreg)); return 0; } #endif int #ifdef UNWIND_V1 unw_access_ar_v1 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif #ifdef UNWIND_V2 unw_access_ar_v2 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif #ifdef UNWIND_V3 unw_access_ar_v3 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif { unsigned long *addr; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if (info->flags & UNW_FLAG_INTERRUPT_FRAME) pt = (struct pt_regs *) info->psp - 1; else pt = (struct pt_regs *) info->sp - 1; switch (regnum) { case UNW_AR_BSP: addr = info->bsp_loc; if (!addr) addr = &info->sw->ar_bspstore; break; case UNW_AR_BSPSTORE: addr = info->bspstore_loc; if (!addr) addr = &info->sw->ar_bspstore; break; case UNW_AR_PFS: addr = info->pfs_loc; if (!addr) addr = &info->sw->ar_pfs; break; case UNW_AR_RNAT: addr = info->rnat_loc; if (!addr) addr = &info->sw->ar_rnat; break; case UNW_AR_UNAT: addr = info->unat_loc; if (!addr) addr = &info->sw->ar_unat; break; case UNW_AR_LC: addr = info->lc_loc; if (!addr) addr = &info->sw->ar_lc; break; case UNW_AR_EC: if (!info->cfm_loc) return -1; if (write) *info->cfm_loc = (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52); else *val = (IA64_GET_STACK_ULONG(info->cfm_loc) >> 52) & 0x3f; return 0; case UNW_AR_FPSR: addr = info->fpsr_loc; if (!addr) addr = &info->sw->ar_fpsr; break; case UNW_AR_RSC: if (machdep->flags & UNW_PTREGS) pt = get_scratch_regs(info); addr = &pt->ar_rsc; break; case UNW_AR_CCV: if (machdep->flags & UNW_PTREGS) pt = get_scratch_regs(info); addr = &pt->ar_ccv; break; #if defined(UNWIND_V3) case UNW_AR_CSD: if (machdep->flags & UNW_PTREGS) pt = get_scratch_regs(info); addr = &pt->ar_csd; break; case UNW_AR_SSD: if (machdep->flags & UNW_PTREGS) pt = get_scratch_regs(info); addr = &pt->ar_ssd; break; #endif default: error(INFO, "unwind: trying to access non-existent ar%u\n", regnum); return -1; } if (write) *addr = *val; else *val = IA64_GET_STACK_ULONG(addr); return 0; } int #ifdef UNWIND_V1 unw_access_pr_v1 (struct unw_frame_info *info, unsigned long *val, int write) #endif #ifdef UNWIND_V2 unw_access_pr_v2 (struct unw_frame_info *info, unsigned long *val, int write) #endif #ifdef UNWIND_V3 unw_access_pr_v3 (struct unw_frame_info *info, unsigned long *val, int write) #endif { unsigned long *addr; struct bt_info *bt = (struct bt_info *)info->task; addr = info->pr_loc; if (!addr) addr = &info->sw->pr; if (write) *addr = *val; else *val = IA64_GET_STACK_ULONG(addr); return 0; } /* Routines to manipulate the state stack. */ static inline void push (struct unw_state_record *sr) { struct unw_reg_state *rs; rs = alloc_reg_state(); if (!rs) { error(INFO, "unwind: cannot stack reg state!\n"); return; } memcpy(rs, &sr->curr, sizeof(*rs)); sr->curr.next = rs; } static void pop (struct unw_state_record *sr) { struct unw_reg_state *rs = sr->curr.next; if (!rs) { error(INFO, "unwind: stack underflow!\n"); return; } memcpy(&sr->curr, rs, sizeof(*rs)); free_reg_state(rs); } /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */ static struct unw_reg_state * dup_state_stack (struct unw_reg_state *rs) { struct unw_reg_state *copy, *prev = NULL, *first = NULL; while (rs) { copy = alloc_reg_state(); if (!copy) { error(INFO, "unwind.dup_state_stack: out of memory\n"); return NULL; } memcpy(copy, rs, sizeof(*copy)); if (first) prev->next = copy; else first = copy; rs = rs->next; prev = copy; } return first; } /* Free all stacked register states (but not RS itself). */ static void free_state_stack (struct unw_reg_state *rs) { struct unw_reg_state *p, *next; for (p = rs->next; p != NULL; p = next) { next = p->next; free_reg_state(p); } rs->next = NULL; } /* Routines to manipulate the state stack. */ static enum unw_register_index __attribute__((const)) decode_abreg (unsigned char abreg, int memory) { switch (abreg) { case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04); case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22); case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30); case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41); case 0x60: return UNW_REG_PR; case 0x61: return UNW_REG_PSP; case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR; case 0x63: return UNW_REG_RP; case 0x64: return UNW_REG_BSP; case 0x65: return UNW_REG_BSPSTORE; case 0x66: return UNW_REG_RNAT; case 0x67: return UNW_REG_UNAT; case 0x68: return UNW_REG_FPSR; case 0x69: return UNW_REG_PFS; case 0x6a: return UNW_REG_LC; default: break; } error(INFO, "unwind: bad abreg=0x%x\n", abreg); return UNW_REG_LC; } static void set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val) { reg->val = val; reg->where = where; if (reg->when == UNW_WHEN_NEVER) reg->when = when; } static void alloc_spill_area (unsigned long *offp, unsigned long regsize, struct unw_reg_info *lo, struct unw_reg_info *hi) { struct unw_reg_info *reg; for (reg = hi; reg >= lo; --reg) { if (reg->where == UNW_WHERE_SPILL_HOME) { reg->where = UNW_WHERE_PSPREL; *offp -= regsize; reg->val = *offp; #ifndef KERNEL_FIX reg->val = 0x10 - *offp; *offp += regsize; #endif } } } static inline void spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t) { struct unw_reg_info *reg; for (reg = *regp; reg <= lim; ++reg) { if (reg->where == UNW_WHERE_SPILL_HOME) { reg->when = t; *regp = reg + 1; return; } } error(INFO, "unwind: excess spill!\n"); } static inline void finish_prologue (struct unw_state_record *sr) { struct unw_reg_info *reg; unsigned long off; int i; /* * First, resolve implicit register save locations (see Section "11.4.2.3 Rules * for Using Unwind Descriptors", rule 3): */ for (i = 0; i < (int) sizeof(unw.save_order)/sizeof(unw.save_order[0]); ++i) { reg = sr->curr.reg + unw.save_order[i]; if (reg->where == UNW_WHERE_GR_SAVE) { reg->where = UNW_WHERE_GR; reg->val = sr->gr_save_loc++; } } /* * Next, compute when the fp, general, and branch registers get * saved. This must come before alloc_spill_area() because * we need to know which registers are spilled to their home * locations. */ if (sr->imask) { unsigned char kind, mask = 0, *cp = sr->imask; unsigned long t; static const unsigned char limit[3] = { UNW_REG_F31, UNW_REG_R7, UNW_REG_B5 }; struct unw_reg_info *(regs[3]); regs[0] = sr->curr.reg + UNW_REG_F2; regs[1] = sr->curr.reg + UNW_REG_R4; regs[2] = sr->curr.reg + UNW_REG_B1; for (t = 0; t < sr->region_len; ++t) { if ((t & 3) == 0) mask = *cp++; kind = (mask >> 2*(3-(t & 3))) & 3; if (kind > 0) spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1], sr->region_start + t); } } /* * Next, lay out the memory stack spill area: */ if (sr->any_spills) { off = sr->spill_offset; alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31); alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5); alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7); } } /* * Region header descriptors. */ static void desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave, struct unw_state_record *sr) { int i; if (!(sr->in_body || sr->first_region)) finish_prologue(sr); sr->first_region = 0; /* check if we're done: */ if (sr->when_target < sr->region_start + sr->region_len) { sr->done = 1; return; } for (i = 0; i < sr->epilogue_count; ++i) pop(sr); sr->epilogue_count = 0; sr->epilogue_start = UNW_WHEN_NEVER; if (!body) push(sr); sr->region_start += sr->region_len; sr->region_len = rlen; sr->in_body = body; if (!body) { for (i = 0; i < 4; ++i) { if (mask & 0x8) set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR, sr->region_start + sr->region_len - 1, grsave++); mask <<= 1; } sr->gr_save_loc = grsave; sr->any_spills = 0; sr->imask = 0; sr->spill_offset = 0x10; /* default to psp+16 */ } } /* * Prologue descriptors. */ static inline void desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr) { console("desc_abi: abi: 0x%x context: %c\n", abi, context); if (((abi == 0) || (abi == 3)) && context == 'i') sr->flags |= UNW_FLAG_INTERRUPT_FRAME; else error(INFO, "unwind: ignoring unwabi(abi=0x%x,context=0x%x)\n", abi, context); } static inline void desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); brmask >>= 1; } } static inline void desc_br_mem (unsigned char brmask, struct unw_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) { set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } brmask >>= 1; } } static inline void desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } for (i = 0; i < 20; ++i) { if ((frmask & 1) != 0) { int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4; set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } #ifndef KERNEL_FIX for (i = 0; i < 20; ++i) { if ((frmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } #endif } static inline void desc_fr_mem (unsigned char frmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((frmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); grmask >>= 1; } } static inline void desc_gr_mem (unsigned char grmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } } static inline void desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr) { set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE, sr->region_start + MIN((int)t, sr->region_len - 1), 16*size); } static inline void desc_mem_stack_v (unw_word t, struct unw_state_record *sr) { sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1); } static inline void desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst); } static inline void desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1, 0x10 - 4*pspoff); } static inline void desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1, 4*spoff); } static inline void desc_rp_br (unsigned char dst, struct unw_state_record *sr) { sr->return_link_reg = dst; } static inline void desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr) { struct unw_reg_info *reg = sr->curr.reg + regnum; if (reg->where == UNW_WHERE_NONE) reg->where = UNW_WHERE_GR_SAVE; reg->when = sr->region_start + MIN((int)t, sr->region_len - 1); } static inline void desc_spill_base (unw_word pspoff, struct unw_state_record *sr) { sr->spill_offset = 0x10 - 4*pspoff; } static inline unsigned char * desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr) { sr->imask = imaskp; return imaskp + (2*sr->region_len + 7)/8; } /* * Body descriptors. */ static inline void desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr) { sr->epilogue_start = sr->region_start + sr->region_len - 1 - t; sr->epilogue_count = ecount + 1; } static inline void desc_copy_state (unw_word label, struct unw_state_record *sr) { struct unw_labeled_state *ls; for (ls = sr->labeled_states; ls; ls = ls->next) { if (ls->label == label) { free_state_stack(&sr->curr); memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr)); sr->curr.next = dup_state_stack(ls->saved_state.next); return; } } error(INFO, "unwind: failed to find state labeled 0x%lx\n", label); } static inline void desc_label_state (unw_word label, struct unw_state_record *sr) { struct unw_labeled_state *ls; ls = alloc_labeled_state(); if (!ls) { error(INFO, "unwind.desc_label_state(): out of memory\n"); return; } ls->label = label; memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state)); ls->saved_state.next = dup_state_stack(sr->curr.next); /* insert into list of labeled states: */ ls->next = sr->labeled_states; sr->labeled_states = ls; } /* * General descriptors. */ static inline int desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) { if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1)) return 0; if (qp > 0) { if ((sr->pr_val & (1UL << qp)) == 0) return 0; sr->pr_mask |= (1UL << qp); } return 1; } static inline void desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 0); r->where = UNW_WHERE_NONE; r->when = UNW_WHEN_NEVER; r->val = 0; } static inline void desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, unsigned char ytreg, struct unw_state_record *sr) { enum unw_where where = UNW_WHERE_GR; struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; if (x) where = UNW_WHERE_BR; else if (ytreg & 0x80) where = UNW_WHERE_FR; r = sr->curr.reg + decode_abreg(abreg, 0); r->where = where; r->when = sr->region_start + MIN((int)t, sr->region_len - 1); r->val = (ytreg & 0x7f); } static inline void desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 1); r->where = UNW_WHERE_PSPREL; r->when = sr->region_start + MIN((int)t, sr->region_len - 1); r->val = 0x10 - 4*pspoff; } static inline void desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 1); r->where = UNW_WHERE_SPREL; r->when = sr->region_start + MIN((int)t, sr->region_len - 1); r->val = 4*spoff; } #define UNW_DEC_BAD_CODE(code) error(INFO, "unwind: unknown code 0x%02x\n", code); /* * region headers: */ #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg) #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg) /* * prologue descriptors: */ #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg) #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg) #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg) #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg) #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg) #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg) #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg) #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg) #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg) #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg) #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg) #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg) #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg) #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg) #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg) #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg) #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg) #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg) #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg)) /* * body descriptors: */ #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg) #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg) #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg) /* * general unwind descriptors: */ #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg) #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg) #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg) #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg) #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg) #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg) #include "unwind_decoder.c" /* * Run a sanity check on the common structure usage, and do an initial * read of the unw table. If anything fails, the UNW_OUT_OF_SYNC flag * will be set and backtraces not allowed. */ void #ifdef UNWIND_V1 unwind_init_v1(void) #endif #ifdef UNWIND_V2 unwind_init_v2(void) #endif #ifdef UNWIND_V3 unwind_init_v3(void) #endif { int len; struct gnu_request request, *req; req = &request; if (LKCD_KERNTYPES()) { if ((len = STRUCT_SIZE("unw")) == 0) { error(WARNING, "cannot determine unw.tables offset; no struct unw\n"); machdep->flags |= UNW_OUT_OF_SYNC; return; } machdep->machspec->unw_tables_offset = MEMBER_OFFSET("unw", "tables"); if (MEMBER_EXISTS("unw", "r0")) machdep->flags |= UNW_R0; /* * no verification of save_order, sw_off, preg_index as * we're purely depending on the structure definition. */ if (MEMBER_EXISTS("unw", "pt_regs_offsets")) { machdep->machspec->unw_pt_regs_offsets = MEMBER_OFFSET("unw", "pt_regs_offsets") - machdep->machspec->unw_tables_offset; machdep->machspec->unw_kernel_table_offset = MEMBER_OFFSET("unw", "kernel_table") - machdep->machspec->unw_tables_offset; machdep->flags |= UNW_PTREGS; } if (!load_unw_table(CLEAR_SCRIPT_CACHE)) { error(WARNING, "unwind_init: cannot read kernel unw table\n"); machdep->flags |= UNW_OUT_OF_SYNC; } machdep->machspec->unw = (void *)&unw; /* fall to common structure size verifications */ goto verify; } if (get_symbol_type("unw", "tables", req) == TYPE_CODE_UNDEF) { /* * KLUDGE ALERT: * If unw.tables cannot be ascertained by gdb, try unw.save_order, * given that it is the field just after unw.tables. */ if (get_symbol_type("unw", "save_order", req) == TYPE_CODE_UNDEF) { error(WARNING, "cannot determine unw.tables offset\n"); machdep->flags |= UNW_OUT_OF_SYNC; } else req->member_offset -= BITS_PER_BYTE * sizeof(void *); if (CRASHDEBUG(1)) error(WARNING, "using unw.save_order to determine unw.tables\n"); } if (!(machdep->flags & UNW_OUT_OF_SYNC)) { machdep->machspec->unw_tables_offset = req->member_offset/BITS_PER_BYTE; if (get_symbol_type("unw", "r0", req) != TYPE_CODE_UNDEF) machdep->flags |= UNW_R0; verify_unw_member("save_order", struct_offset(struct unw, save_order)); verify_unw_member("sw_off", struct_offset(struct unw, sw_off)); verify_unw_member("preg_index", struct_offset(struct unw, preg_index)); if (get_symbol_type("unw", "pt_regs_offsets", req) == TYPE_CODE_ARRAY) { machdep->machspec->unw_pt_regs_offsets = req->member_offset/BITS_PER_BYTE - machdep->machspec->unw_tables_offset; get_symbol_type("unw", "kernel_table", req); machdep->machspec->unw_kernel_table_offset = req->member_offset/BITS_PER_BYTE - machdep->machspec->unw_tables_offset; machdep->flags |= UNW_PTREGS; } else verify_unw_member("kernel_table", struct_offset(struct unw, kernel_table)); if (!load_unw_table(CLEAR_SCRIPT_CACHE)) { error(WARNING, "unwind_init: cannot read kernel unw table\n"); machdep->flags |= UNW_OUT_OF_SYNC; } machdep->machspec->unw = (void *)&unw; } verify: verify_common_struct("unw_frame_info", sizeof(struct unw_frame_info)); verify_common_struct("unw_table", sizeof(struct unw_table)); verify_common_struct("unw_table_entry", sizeof(struct unw_table_entry)); verify_common_struct("unw_state_record", sizeof(struct unw_state_record)); verify_common_struct("unw_labeled_state", sizeof(struct unw_labeled_state)); verify_common_struct("unw_reg_info", sizeof(struct unw_reg_info)); verify_common_struct("unw_insn", sizeof(struct unw_insn)); } /* * Check whether the unw fields used in this port exist at the same * offset as the local version of the structure. */ static void verify_unw_member(char *member, long loffs) { struct gnu_request request, *req; long koffs; req = &request; if (get_symbol_type("unw", member, req) == TYPE_CODE_UNDEF) { error(WARNING, "cannot determine unw.%s offset\n", member); machdep->flags |= UNW_OUT_OF_SYNC; } else { koffs = (req->member_offset/BITS_PER_BYTE) - machdep->machspec->unw_tables_offset; if (machdep->flags & UNW_R0) koffs -= sizeof(unsigned long); if (koffs != loffs) { error(WARNING, "unw.%s offset differs: %ld (local: %d)\n", member, koffs, loffs); machdep->flags |= UNW_OUT_OF_SYNC; } else if (CRASHDEBUG(3)) error(INFO, "unw.%s offset OK: %ld (local: %d)\n", member, koffs, loffs); } } /* * Check whether the sizes of common local/kernel structures match. */ static void verify_common_struct(char *structname, long loclen) { long len; len = STRUCT_SIZE(structname); if (len < 0) { error(WARNING, "cannot determine size of %s\n", structname); machdep->flags |= UNW_OUT_OF_SYNC; } else if (len != loclen) { error(WARNING, "%s size differs: %ld (local: %d)\n", structname, len, loclen); machdep->flags |= UNW_OUT_OF_SYNC; } } /* * Do a one-time read of the useful part of the kernel's unw table into the * truncated local version, followed by a one-time read of the kernel's * unw_table_entry array into a permanently allocated location. The * script cache is cleared only if requested. */ static int load_unw_table(int clear_cache) { int i; size_t len; struct machine_specific *ms; struct unw_table_entry *kernel_unw_table_entry_array; if (machdep->flags & UNW_OUT_OF_SYNC) return FALSE; ms = machdep->machspec; if (clear_cache) { if (!ms->script_cache) { len = sizeof(struct unw_script) * UNW_CACHE_SIZE; if ((ms->script_cache = (struct unw_script *)malloc(len)) == NULL) { error(WARNING, "cannot malloc unw_script cache\n"); return FALSE; } } for (i = 0; i < UNW_CACHE_SIZE; i++) BZERO((void *)&ms->script_cache[i], sizeof(struct unw_script)); ms->script_index = 0; } if (machdep->flags & UNW_READ) return TRUE; if (machdep->flags & UNW_R0) { struct unw *unw_temp, *up; unw_temp = (struct unw *)GETBUF(sizeof(struct unw) * 2); up = unw_temp; if (!readmem(symbol_value("unw")+ms->unw_tables_offset, KVADDR, up, sizeof(struct unw) + sizeof(struct unw_table *), "unw", RETURN_ON_ERROR|QUIET)) return FALSE; unw.tables = up->tables; /* * Bump the "up" pointer by 8 to account for the * "r0" member that comes after the "tables" member. */ up = (struct unw *)(((unsigned long)unw_temp) + sizeof(struct unw_table *)); for (i = 0; i < 8; i++) unw.save_order[i] = up->save_order[i]; for (i = 0; i < (sizeof(struct unw_frame_info) / 8); i++) unw.sw_off[i] = up->sw_off[i]; unw.lru_head = up->lru_head; unw.lru_tail = up->lru_tail; for (i = 0; i < UNW_NUM_REGS; i++) unw.preg_index[i] = up->preg_index[i]; BCOPY(&up->kernel_table, &unw.kernel_table, sizeof(struct unw_table)); FREEBUF(unw_temp); } else { if (!readmem(symbol_value("unw")+ms->unw_tables_offset, KVADDR, &unw, sizeof(struct unw), "unw", RETURN_ON_ERROR|QUIET)) return FALSE; } if (machdep->flags & UNW_PTREGS) { if (!readmem(symbol_value("unw")+ms->unw_kernel_table_offset+ machdep->machspec->unw_tables_offset, KVADDR, &unw.kernel_table, sizeof(struct unw_table), "unw.kernel_table", RETURN_ON_ERROR|QUIET)) return FALSE; if (!readmem(symbol_value("unw")+ms->unw_pt_regs_offsets+ machdep->machspec->unw_tables_offset, KVADDR, &pt_regs_offsets, sizeof(pt_regs_offsets), "unw.pt_regs_offsets", RETURN_ON_ERROR|QUIET)) return FALSE; } len = unw.kernel_table.length * sizeof(struct unw_table_entry); if ((kernel_unw_table_entry_array = (struct unw_table_entry *)malloc(len)) == NULL) { error(WARNING, "cannot malloc kernel unw.kernel_table array (len: %d)\n", len); return FALSE; } if (!readmem((ulong)unw.kernel_table.array, KVADDR, kernel_unw_table_entry_array, len, "kernel unw_table_entry array", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot read kernel unw.kernel_table array\n"); return FALSE; } /* * Bait and switch for the kernel array only. */ unw.kernel_table.array = kernel_unw_table_entry_array; machdep->flags |= UNW_READ; return TRUE; } /* * The main back trace loop. If we get interrupted in the midst of an * operation, unw_in_progress will left TRUE, and the next time we come * here, the script_cache will be cleared. */ void #ifdef UNWIND_V1 unwind_v1(struct bt_info *bt) #endif #ifdef UNWIND_V2 unwind_v2(struct bt_info *bt) #endif #ifdef UNWIND_V3 unwind_v3(struct bt_info *bt) #endif { struct unw_frame_info unw_frame_info, *info; unsigned long ip, sp, bsp; struct syment *sm; struct pt_regs *pt; int frame; char *name, *name_plus_offset; ulong offset; struct load_module *lm; static int unw_in_progress = FALSE; char buf[BUFSIZE]; if (bt->debug) CRASHDEBUG_SUSPEND(bt->debug); if (!load_unw_table(unw_in_progress ? CLEAR_SCRIPT_CACHE : 0)) error(FATAL, "unwind: cannot read kernel unw table\n"); unw_in_progress = TRUE; info = &unw_frame_info; if (!unw_init_from_blocked_task(info, bt)) goto unwind_return; frame = 0; do { restart: unw_get_ip(info, &ip); unw_get_sp(info, &sp); unw_get_bsp(info, &bsp); if (XEN_HYPER_MODE()) { if (!IS_KVADDR(ip)) break; } else { if (ip < GATE_ADDR + PAGE_SIZE) break; } name_plus_offset = NULL; if ((sm = value_search(ip, &offset))) { name = sm->name; if ((bt->flags & BT_SYMBOL_OFFSET) && offset) name_plus_offset = value_to_symstr(ip, buf, bt->radix); } else name = "(unknown)"; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(name, bt->ref->str)) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; case BT_REF_HEXVAL: if (bt->ref->hexval == ip) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; } } else { fprintf(fp, "%s#%d [BSP:%lx] %s at %lx", frame >= 10 ? "" : " ", frame, bsp, name_plus_offset ? name_plus_offset : name, ip); if (module_symbol(ip, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); if (bt->flags & BT_FULL) rse_function_params(bt, info, name); if (bt->flags & BT_LINE_NUMBERS) ia64_dump_line_number(ip); if (info->flags & UNW_FLAG_INTERRUPT_FRAME) { pt = (struct pt_regs *)info->psp - 1; ia64_exception_frame((ulong)pt, bt); } } if (STREQ(name, "start_kernel") || STREQ(name, "start_secondary") || STREQ(name, "start_kernel_thread")) break; /* * "init_handler_platform" indicates that this task was * interrupted by INIT and its stack was switched. */ if (STREQ(name, "init_handler_platform")) { unw_switch_from_osinit_v1(info, bt); frame++; goto restart; } /* * In some cases, init_handler_platform is inlined into * ia64_init_handler. */ if (STREQ(name, "ia64_init_handler")) { if (symbol_exists("ia64_mca_modify_original_stack")) { /* * 2.6.14 or later kernels no longer keep * minstate info in pt_regs/switch_stack. * unw_switch_from_osinit_v3() will try * to find the interrupted task and restart * backtrace itself. */ if (unw_switch_from_osinit_v3(info, bt, "INIT") == FALSE) break; } else { if (unw_switch_from_osinit_v2(info, bt) == FALSE) break; frame++; goto restart; } } if (STREQ(name, "ia64_mca_handler") && symbol_exists("ia64_mca_modify_original_stack")) if (unw_switch_from_osinit_v3(info, bt, "MCA") == FALSE) break; frame++; } while (unw_unwind(info) >= 0); unwind_return: if (bt->flags & BT_UNWIND_ERROR) load_unw_table(CLEAR_SCRIPT_CACHE); if (bt->debug) CRASHDEBUG_RESTORE(); unw_in_progress = FALSE; } void #ifdef UNWIND_V1 dump_unwind_stats_v1(void) #endif #ifdef UNWIND_V2 dump_unwind_stats_v2(void) #endif #ifdef UNWIND_V3 dump_unwind_stats_v3(void) #endif { int i; struct machine_specific *ms; char buf[BUFSIZE]; if (machdep->flags & UNW_OUT_OF_SYNC) { fprintf(fp, "\n"); return; } ms = machdep->machspec; fprintf(fp, " %2ld%% (%ld of %ld)\n", ms->script_cache_fills ? (ms->script_cache_hits * 100)/ms->script_cache_fills : 0, ms->script_cache_hits, ms->script_cache_fills); for (i = 0; i < UNW_CACHE_SIZE; i++) { if (ms->script_cache[i].ip) fprintf(fp, " [%3d]: %lx %s\n", i, ms->script_cache[i].ip, value_to_symstr(ms->script_cache[i].ip, buf, 0)); } } int #ifdef UNWIND_V1 unwind_debug_v1(ulong arg) #endif #ifdef UNWIND_V2 unwind_debug_v2(ulong arg) #endif #ifdef UNWIND_V3 unwind_debug_v3(ulong arg) #endif { struct unw_table *table, *target; struct unw_table unw_table_buf; target = (struct unw_table *)arg; table = unw.tables; do { if (!readmem((ulong)table, KVADDR, &unw_table_buf, sizeof(struct unw_table), "module unw_table", RETURN_ON_ERROR)) break; switch (arg) { case 3: dump_unwind_table(table); break; default: if (table == target) dump_unwind_table(table); break; } table = &unw_table_buf; table = table->next; } while (table); return TRUE; } static void dump_unwind_table(struct unw_table *table) { struct unw_table unw_table_buf, *tbl; readmem((ulong)table, KVADDR, &unw_table_buf, sizeof(struct unw_table), "module unw_table", RETURN_ON_ERROR); tbl = &unw_table_buf; dump_struct("unw_table", (ulong)table, RADIX(16)); } static unsigned long get_init_stack_ulong(unsigned long addr) { unsigned long tmp; readmem(addr, KVADDR, &tmp, sizeof(unsigned long), "get_init_stack_ulong", FAULT_ON_ERROR); return tmp; } static int unw_init_from_blocked_task(struct unw_frame_info *info, struct bt_info *bt) { ulong sw; sw = SWITCH_STACK_ADDR(bt->task); if (XEN_HYPER_MODE()) { if (!INSTACK(sw, bt) && !ia64_in_mca_stack_hyper(sw, bt)) return FALSE; } else { if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw)) return FALSE; } unw_init_frame_info(info, bt, sw); return TRUE; } /* * unw_init_from_interruption * Initialize frame info from specified pt_regs/switch_stack. * * Similar to unw_init_frame_info() except that: * - do not use readmem to access stack * (because stack may be modified by unw_init_from_saved_regs) * - use ar.ifs and ar.iip instead of ar.pfs and b0, respectively * - use sof(size-of-frame) of ar.ifs to caluculate bsp, * instead of sol(size-of-local) of ar.pfs * (because of cover instruction in kernel minstate save macro) */ static void unw_init_from_interruption(struct unw_frame_info *info, struct bt_info *bt, ulong pt, ulong sw) { // unsigned long rbslimit, rbstop, stklimit, stktop, sof, ar_pfs; unsigned long rbslimit, rbstop, stklimit, stktop, sof; ulong t; t = bt->task; memset(info, 0, sizeof(*info)); rbslimit = (unsigned long) t + IA64_RBS_OFFSET; rbstop = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_ar_bspstore)); if (rbstop - (unsigned long) t >= IA64_STK_OFFSET) rbstop = rbslimit; stklimit = (unsigned long) t + IA64_STK_OFFSET; stktop = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, r12)); if (stktop <= rbstop) stktop = rbstop; info->regstk.limit = rbslimit; info->regstk.top = rbstop; info->memstk.limit = stklimit; info->memstk.top = stktop; info->task = (struct task_struct *)bt; info->sw = (struct switch_stack *)sw; info->sp = info->psp = stktop; info->pr = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_pr)); info->cfm_loc = (unsigned long *) (pt + offsetof(struct pt_regs, cr_ifs)); info->unat_loc = (unsigned long *) (pt + offsetof(struct pt_regs, ar_unat)); info->pfs_loc = (unsigned long *) (pt + offsetof(struct pt_regs, ar_pfs)); /* register stack is covered */ sof = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof); /* interrupted ip is saved in iip */ info->ip = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, cr_iip)); #if defined(UNWIND_V2) || defined(UNWIND_V3) info->pt = pt; #endif find_save_locs(info); } /* * unw_switch_from_osinit * switch back to interrupted context * * assumption: init_handler_platform() has 3 arguments, * 2nd arg is pt_regs and 3rd arg is switch_stack. */ static int unw_switch_from_osinit_v1(struct unw_frame_info *info, struct bt_info *bt) { unsigned long pt, sw; char is_nat; /* pt_regs is the 2nd argument of init_handler_platform */ if (unw_get_gr(info, 33, &pt, &is_nat)) { fprintf(fp, "gr 33 get error\n"); return FALSE; } /* switch_stack is the 3rd argument of init_handler_platform */ if (unw_get_gr(info, 34, &sw, &is_nat)) { fprintf(fp, "gr 33 get error\n"); return FALSE; } unw_init_from_interruption(info, bt, pt, sw); ia64_exception_frame(pt, bt); return TRUE; } static int unw_switch_from_osinit_v2(struct unw_frame_info *info, struct bt_info *bt) { unsigned long pt, sw; char is_nat; /* pt_regs is the 1st argument of ia64_init_handler */ if (unw_get_gr(info, 32, &pt, &is_nat)) { fprintf(fp, "gr 32 get error\n"); return FALSE; } /* switch_stack is the 2nd argument of ia64_init_handler */ if (unw_get_gr(info, 33, &sw, &is_nat)) { fprintf(fp, "gr 33 get error\n"); return FALSE; } /* Fix me! */ sw = info->psp + 16; pt = sw + STRUCT_SIZE("switch_stack"); unw_init_from_interruption(info, bt, pt, sw); ia64_exception_frame(pt, bt); return TRUE; } /* CPL (current privilege level) is 2-bit field */ #define IA64_PSR_CPL0_BIT 32 #define IA64_PSR_CPL_MASK (3UL << IA64_PSR_CPL0_BIT) static int user_mode(struct bt_info *bt, unsigned long pt) { unsigned long cr_ipsr; cr_ipsr = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, cr_ipsr)); if (cr_ipsr & IA64_PSR_CPL_MASK) return 1; return 0; } /* * Cope with INIT/MCA stack for the kernel 2.6.14 or later * * Returns FALSE if no more unwinding is needed. */ #define ALIGN16(x) ((x)&~15) static int unw_switch_from_osinit_v3(struct unw_frame_info *info, struct bt_info *bt, char *type) { unsigned long pt, sw, sos, pid; char *p, *q; struct task_context *tc = NULL; struct bt_info clone_bt; unsigned long kr_current, offset_kr; /* * The structure of INIT/MCA stack * * +---------------------------+ <-------- IA64_STK_OFFSET * | pt_regs | * +---------------------------+ * | switch_stack | * +---------------------------+ * | SAL/OS state | * +---------------------------+ * | 16 byte scratch area | * +---------------------------+ <-------- SP at start of C handler * | ..... | * +---------------------------+ * | RBS for MCA/INIT handler | * +---------------------------+ * | struct task for MCA/INIT | * +---------------------------+ <-------- bt->task */ pt = ALIGN16(bt->task + IA64_STK_OFFSET - STRUCT_SIZE("pt_regs")); sw = ALIGN16(pt - STRUCT_SIZE("switch_stack")); sos = ALIGN16(sw - STRUCT_SIZE("ia64_sal_os_state")); /* * 1. Try to find interrupted task from comm * * comm format of INIT/MCA task: * - " " * - " " * where "" is either "INIT" or "MCA". * The latter form is chosen if PID is 0. * * See ia64_mca_modify_comm() in arch/ia64/kernel/mca.c */ if (!bt->tc || !bt->tc->comm) goto find_exframe; /* * If comm is "INIT" or "MCA", it means original stack is not modified. */ if (STREQ(bt->tc->comm, type)) { /* Get pid using ia64_sal_os_state */ pid = 0; offset_kr = MEMBER_OFFSET("ia64_sal_os_state", "prev_IA64_KR_CURRENT"); readmem(sos + offset_kr, KVADDR, &kr_current, sizeof(ulong), "ia64_sal_os_state prev_IA64_KR_CURRENT", FAULT_ON_ERROR); readmem(kr_current + OFFSET(task_struct_pid), KVADDR, &pid, sizeof(pid_t), "task_struct pid", FAULT_ON_ERROR); if (pid) tc = pid_to_context(pid); else { tc = pid_to_context(0); while (tc) { if (tc != bt->tc && tc->processor == bt->tc->processor) break; tc = tc->tc_next; } } if (tc) { /* Clone bt_info and do backtrace */ clone_bt_info(bt, &clone_bt, tc); if (!BT_REFERENCE_CHECK(&clone_bt)) { fprintf(fp, "(%s) INTERRUPTED TASK\n", type); print_task_header(fp, tc, 0); } if (!user_mode(bt, pt)) goto find_exframe; else if (!BT_REFERENCE_CHECK(bt)) { fprintf(fp, " #0 [interrupted in user space]\n"); /* at least show the incomplete exception frame */ bt->flags |= BT_INCOMPLETE_USER_EFRAME; ia64_exception_frame(pt, bt); } } return FALSE; } if ((p = strstr(bt->tc->comm, type))) { p += strlen(type); if (*p != ' ') goto find_exframe; if ((q = strchr(++p, ' '))) { /* * " " * * We came from one of the PID 0 swapper tasks, * so just find the one with the same cpu as * the passed-in INIT/MCA task. */ tc = pid_to_context(0); while (tc) { if (tc != bt->tc && tc->processor == bt->tc->processor) break; tc = tc->tc_next; } } else if (sscanf(p, "%lu", &pid) > 0) /* " " */ tc = pid_to_context(pid); } if (tc) { /* Clone bt_info and do backtrace */ clone_bt_info(bt, &clone_bt, tc); if (!BT_REFERENCE_CHECK(&clone_bt)) { fprintf(fp, "(%s) INTERRUPTED TASK\n", type); print_task_header(fp, tc, 0); } if (!user_mode(bt, pt)) back_trace(&clone_bt); else if (!BT_REFERENCE_CHECK(bt)) { fprintf(fp, " #0 [interrupted in user space]\n"); /* at least show the incomplete exception frame */ bt->flags |= BT_INCOMPLETE_USER_EFRAME; ia64_exception_frame(pt, bt); } return FALSE; } /* task matching with INIT/MCA task's comm is not found */ find_exframe: /* * 2. If step 1 doesn't work, try best to find exception frame */ unw_init_from_interruption(info, bt, pt, sw); if (!BT_REFERENCE_CHECK(bt)) ia64_exception_frame(pt, bt); return TRUE; } static void unw_init_frame_info (struct unw_frame_info *info, struct bt_info *bt, ulong sw) { unsigned long rbslimit, rbstop, stklimit, stktop, sol, ar_pfs; ulong t; t = bt->task; /* * Subtle stuff here: we _could_ unwind through the * switch_stack frame but we don't want to do that because it * would be slow as each preserved register would have to be * processed. Instead, what we do here is zero out the frame * info and start the unwind process at the function that * created the switch_stack frame. When a preserved value in * switch_stack needs to be accessed, run_script() will * initialize the appropriate pointer on demand. */ memset(info, 0, sizeof(*info)); rbslimit = (unsigned long) t + IA64_RBS_OFFSET; readmem(sw + OFFSET(switch_stack_ar_bspstore), KVADDR, &rbstop, sizeof(ulong), "switch_stack ar_bspstore", FAULT_ON_ERROR); if (rbstop - (unsigned long) t >= IA64_STK_OFFSET) rbstop = rbslimit; stklimit = (unsigned long) t + IA64_STK_OFFSET; stktop = (unsigned long) sw - 16; if (stktop <= rbstop) stktop = rbstop; info->regstk.limit = rbslimit; info->regstk.top = rbstop; info->memstk.limit = stklimit; info->memstk.top = stktop; info->task = (struct task_struct *)bt; info->sw = (struct switch_stack *)sw; info->sp = info->psp = (unsigned long) (sw + SIZE(switch_stack)) - 16; info->cfm_loc = (ulong *)(sw + OFFSET(switch_stack_ar_pfs)); ar_pfs = IA64_GET_STACK_ULONG(info->cfm_loc); sol = (ar_pfs >> 7) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol); info->ip = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_b0)); info->pr = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_pr)); find_save_locs(info); } /* * Display the arguments to a function, presuming that they are found at * the beginning of the sol section. */ #define MAX_REGISTER_PARAMS (8) static void rse_function_params(struct bt_info *bt, struct unw_frame_info *info, char *name) { int i; int numargs; char is_nat[MAX_REGISTER_PARAMS]; int retval[MAX_REGISTER_PARAMS]; char buf1[BUFSIZE], buf2[BUFSIZE], buf3[BUFSIZE], *p1; ulong arglist[MAX_REGISTER_PARAMS]; ulong ip; if (GDB_PATCHED()) return; unw_get_ip(info, &ip); numargs = MIN(get_function_numargs(ip), MAX_REGISTER_PARAMS); if (CRASHDEBUG(1)) fprintf(fp, "rse_function_params: %s: %d args\n", name, numargs); switch (numargs) { case 0: fprintf(fp, " (void)\n"); return; case -1: return; default: break; } for (i = 0; i < numargs; i++) { arglist[i] = is_nat[i] = retval[i] = 0; retval[i] = unw_get_gr(info, 32+i, &arglist[i], &is_nat[i]); } sprintf(buf1, " ("); for (i = 0; i < numargs; i++) { p1 = &buf1[strlen(buf1)]; if (retval[i] != 0) sprintf(buf2, "unknown"); if (is_nat[i]) sprintf(buf2, "[NAT]"); else { if (bt->flags & BT_FULL_SYM_SLAB) sprintf(buf2, "%s", format_stack_entry(bt, buf3, arglist[i], kt->end)); else sprintf(buf2, "%lx", arglist[i]); } sprintf(p1, "%s%s", i ? ", " : "", buf2); if (strlen(buf1) >= 80) sprintf(p1, ",\n %s", buf2); } strcat(buf1, ")\n"); fprintf(fp, "%s", buf1); } static int find_save_locs (struct unw_frame_info *info) { struct unw_script *scr; if ((info->ip & (machdep->machspec->unimpl_va_mask | 0xf)) || IS_UVADDR(info->ip, NULL)) { info->rp_loc = 0; return -1; } scr = script_lookup(info); if (!scr) { scr = build_script(info); if (!scr) { error(INFO, "failed to build unwind script for ip %lx\n", info->ip); return -1; } } run_script(scr, info); return 0; } static int unw_unwind (struct unw_frame_info *info) { unsigned long prev_ip, prev_sp, prev_bsp; unsigned long ip, pr, num_regs; int retval; struct bt_info *bt = (struct bt_info *)info->task; prev_ip = info->ip; prev_sp = info->sp; prev_bsp = info->bsp; /* restore the ip */ if (!info->rp_loc) { error(INFO, "unwind: failed to locate return link (ip=0x%lx)!\n", info->ip); return -1; } ip = info->ip = IA64_GET_STACK_ULONG(info->rp_loc); if (ip < GATE_ADDR + PAGE_SIZE) { /* * We don't have unwind info for the gate page, * so we consider that part * of user-space for the purpose of unwinding. */ console("unwind: reached user-space (ip=0x%lx)\n", ip); return -1; } /* restore the cfm: */ if (!info->pfs_loc) { error(INFO, "unwind: failed to locate ar.pfs!\n"); return -1; } info->cfm_loc = info->pfs_loc; /* restore the bsp: */ pr = info->pr; num_regs = 0; if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) { #ifdef UNWIND_V1 if ((pr & (1UL << pNonSys)) != 0) num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */ info->pfs_loc = (unsigned long *) (info->sp + 16 + struct_offset(struct pt_regs, ar_pfs)); #endif #ifdef UNWIND_V2 info->pt = info->sp + 16; if ((pr & (1UL << pNonSys)) != 0) num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */ info->pfs_loc = (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); #endif #ifdef UNWIND_V3 info->pt = info->sp + 16; if ((pr & (1UL << pNonSys)) != 0) num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */ info->pfs_loc = (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); #endif } else num_regs = (IA64_GET_STACK_ULONG(info->cfm_loc) >> 7) & 0x7f; /* size of locals */ info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { error(INFO, "unwind: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", info->bsp, info->regstk.limit, info->regstk.top); return -1; } /* restore the sp: */ info->sp = info->psp; if ((info->sp < info->memstk.top || info->sp > info->memstk.limit) && !ia64_in_init_stack(info->sp)) { error(INFO, "unwind: sp (0x%lx) out of range [0x%lx-0x%lx]\n", info->sp, info->memstk.top, info->memstk.limit); return -1; } if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { error(INFO, "unwind: ip, sp, bsp remain unchanged; stopping here (ip=0x%lx)\n", ip); return -1; } /* as we unwind, the saved ar.unat becomes the primary unat: */ info->pri_unat_loc = info->unat_loc; /* finally, restore the predicates: */ unw_get_pr(info, &info->pr); retval = find_save_locs(info); return retval; } /* * Apply the unwinding actions represented by OPS and update SR to * reflect the state that existed upon entry to the function that this * unwinder represents. */ static void run_script (struct unw_script *script, struct unw_frame_info *state) { struct unw_insn *ip, *limit, next_insn; unsigned long opc, dst, val, off; unsigned long *s = (unsigned long *) state; struct bt_info *bt = (struct bt_info *)state->task; state->flags = script->flags; ip = script->insn; limit = script->insn + script->count; next_insn = *ip; while (ip++ < limit) { opc = next_insn.opc; dst = next_insn.dst; val = next_insn.val; next_insn = *ip; redo: switch (opc) { case UNW_INSN_ADD: s[dst] += val; break; case UNW_INSN_MOVE2: if (!s[val]) goto lazy_init; s[dst+1] = s[val+1]; s[dst] = s[val]; break; case UNW_INSN_MOVE: if (!s[val]) goto lazy_init; s[dst] = s[val]; break; #if defined(UNWIND_V2) || defined(UNWIND_V3) case UNW_INSN_MOVE_SCRATCH: if (state->pt) { s[dst] = (unsigned long) get_scratch_regs(state) + val; } else { s[dst] = 0; } break; #endif case UNW_INSN_MOVE_STACKED: s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp, val); break; case UNW_INSN_ADD_PSP: s[dst] = state->psp + val; break; case UNW_INSN_ADD_SP: s[dst] = state->sp + val; break; case UNW_INSN_SETNAT_MEMSTK: if (!state->pri_unat_loc) state->pri_unat_loc = &state->sw->ar_unat; /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */ s[dst+1] = ((unsigned long)(state->pri_unat_loc) - s[dst]) | UNW_NAT_MEMSTK; break; case UNW_INSN_SETNAT_TYPE: s[dst+1] = val; break; case UNW_INSN_LOAD: #if UNW_DEBUG if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0 || s[val] < TASK_SIZE) { debug(1, "unwind: rejecting bad psp=0x%lx\n", s[val]); break; } #endif s[dst] = IA64_GET_STACK_ULONG(s[val]); break; } } return; lazy_init: off = unw.sw_off[val]; s[val] = (unsigned long) state->sw + off; if (off >= struct_offset(struct switch_stack, r4) && off <= struct_offset(struct switch_stack, r7)) /* * We're initializing a general register: init NaT info, too. Note that * the offset is a multiple of 8 which gives us the 3 bits needed for * the type field. */ s[val+1] = (struct_offset(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK; goto redo; } /* * Don't bother with the kernel's script hashing scheme -- we're not worried * about lookup speed. */ static struct unw_script * script_lookup(struct unw_frame_info *info) { int i; struct unw_script *script; unsigned long ip, pr; struct machine_specific *ms; ms = machdep->machspec; ms->script_cache_fills++; ip = info->ip; pr = info->pr; for (i = 0; i < UNW_CACHE_SIZE; i++) { script = &ms->script_cache[i]; if (!script->ip) break; if ((ip == script->ip) && (((pr ^ script->pr_val) & script->pr_mask) == 0)) { ms->script_cache_hits++; return script; } } return NULL; } static struct unw_script * script_new(unsigned long ip) { struct unw_script *script; struct machine_specific *ms; ms = machdep->machspec; script = &ms->script_cache[ms->script_index]; BZERO(script, sizeof(struct unw_script)); ms->script_index++; ms->script_index %= UNW_CACHE_SIZE; script->ip = ip; return script; } static void script_finalize (struct unw_script *script, struct unw_state_record *sr) { script->pr_mask = sr->pr_mask; script->pr_val = sr->pr_val; } static void script_emit(struct unw_script *script, struct unw_insn insn) { if (script->count >= UNW_MAX_SCRIPT_LEN) { error(INFO, "unwind: script exceeds maximum size of %u instructions!\n", UNW_MAX_SCRIPT_LEN); return; } script->insn[script->count++] = insn; } static void emit_nat_info(struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; struct unw_insn insn; unsigned long val = 0; switch (r->where) { case UNW_WHERE_GR: if (r->val >= 32) { /* register got spilled to a stacked register */ opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_REGSTK; } else /* register got spilled to a scratch register */ opc = UNW_INSN_SETNAT_MEMSTK; break; case UNW_WHERE_FR: opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_VAL; break; case UNW_WHERE_BR: opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_NONE; break; case UNW_WHERE_PSPREL: case UNW_WHERE_SPREL: opc = UNW_INSN_SETNAT_MEMSTK; break; default: error(INFO, "unwind: don't know how to emit nat info for where = %u\n", r->where); return; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); } /* * Build an unwind script that unwinds from state OLD_STATE to the * entrypoint of the function that called OLD_STATE. */ #define UNWIND_INFO_BUFSIZE (3000) /* absurdly large static buffer that */ /* should avoid need for GETBUF() */ static struct unw_script * build_script (struct unw_frame_info *info) { const struct unw_table_entry *e = 0; struct unw_script *script = 0; struct unw_labeled_state *ls, *next; unsigned long ip = info->ip; struct unw_state_record sr; struct unw_table *table; struct unw_reg_info *r; struct unw_insn insn; u8 *dp, *desc_end; u64 hdr; int i; struct unw_table unw_table_buf; char unwind_info_buf[UNWIND_INFO_BUFSIZE]; struct bt_info *bt = (struct bt_info *)info->task; /* build state record */ memset(&sr, 0, sizeof(sr)); for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) r->when = UNW_WHEN_NEVER; sr.pr_val = info->pr; script = script_new(ip); if (!script) { error(INFO, "failed to create a new unwind script\n"); return 0; } /* * The kernel table is embedded and guaranteed to be the first * one on the list. */ table = &unw.kernel_table; if (ip >= table->start && ip < table->end) e = lookup(table, ip - table->segment_base); /* * If not found, walk through the module list. */ while (!e && table->next) { if (!readmem((ulong)table->next, KVADDR, &unw_table_buf, sizeof(struct unw_table), "module unw_table", RETURN_ON_ERROR)) break; table = &unw_table_buf; if (ip >= table->start && ip < table->end) e = lookup(table, ip - table->segment_base); } if (!e) { /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ if (CRASHDEBUG(2)) error(INFO, "unwind: no unwind info for ip %lx\n", ip); bt->flags |= BT_UNWIND_ERROR; sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; sr.curr.reg[UNW_REG_RP].when = -1; sr.curr.reg[UNW_REG_RP].val = 0; compile_reg(&sr, UNW_REG_RP, script); script_finalize(script, &sr); return script; } sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16 + (ip & 0xfUL)); #ifdef REDHAT readmem((ulong)(table->segment_base + e->info_offset), KVADDR, unwind_info_buf, UNWIND_INFO_BUFSIZE, "unwind info", FAULT_ON_ERROR); hdr = *(u64 *)unwind_info_buf; if (((UNW_LENGTH(hdr)*8)+8) > UNWIND_INFO_BUFSIZE) error(FATAL, "absurdly large unwind_info: %d (redefine UNWIND_INFO_BUFSIZE)\n", (UNW_LENGTH(hdr)*8)+8); dp = (u8 *)(unwind_info_buf + 8); desc_end = dp + 8*UNW_LENGTH(hdr); #else hdr = *(u64 *) (table->segment_base + e->info_offset); dp = (u8 *) (table->segment_base + e->info_offset + 8); desc_end = dp + 8*UNW_LENGTH(hdr); #endif while (!sr.done && dp < desc_end) dp = unw_decode(dp, sr.in_body, &sr); if (sr.when_target > sr.epilogue_start) { /* * sp has been restored and all values on the memory stack below * psp also have been restored. */ sr.curr.reg[UNW_REG_PSP].val = 0; sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE; sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER; for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10) || r->where == UNW_WHERE_SPREL) { r->val = 0; r->where = UNW_WHERE_NONE; r->when = UNW_WHEN_NEVER; } } script->flags = sr.flags; /* * If RP did't get saved, generate entry for the return link * register. */ if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) { sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; sr.curr.reg[UNW_REG_RP].when = -1; sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; } /* translate state record into unwinder instructions: */ /* * First, set psp if we're dealing with a fixed-size frame; * subsequent instructions may depend on this value. */ if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE) && sr.curr.reg[UNW_REG_PSP].val != 0) { /* new psp is sp plus frame size */ insn.opc = UNW_INSN_ADD; insn.dst = struct_offset(struct unw_frame_info, psp)/8; insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */ script_emit(script, insn); } /* determine where the primary UNaT is: */ if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) i = UNW_REG_PRI_UNAT_MEM; else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when) i = UNW_REG_PRI_UNAT_GR; else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) i = UNW_REG_PRI_UNAT_MEM; else i = UNW_REG_PRI_UNAT_GR; compile_reg(&sr, i, script); for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i) compile_reg(&sr, i, script); /* free labeled register states & stack: */ for (ls = sr.labeled_states; ls; ls = next) { next = ls->next; free_state_stack(&ls->saved_state); free_labeled_state(ls); } free_state_stack(&sr.curr); script_finalize(script, &sr); return script; } static struct unw_table_entry * lookup(struct unw_table *table, unsigned long rel_ip) { struct unw_table_entry *e = 0; unsigned long lo, hi, mid; struct unw_table_entry *array, *loc_array; static struct unw_table_entry e_returned; if (table == &unw.kernel_table) { array = (struct unw_table_entry *)table->array; loc_array = NULL; } else { loc_array = (struct unw_table_entry *) GETBUF(table->length * sizeof(struct unw_table_entry)); if (!readmem((ulong)table->array, KVADDR, loc_array, table->length * sizeof(struct unw_table_entry), "module unw_table_entry array", RETURN_ON_ERROR|QUIET)) { if (IS_MODULE_VADDR(table->segment_base + rel_ip)) error(WARNING, "cannot read module unw_table_entry array\n"); return 0; } array = loc_array; } /* do a binary search for right entry: */ for (lo = 0, hi = table->length; lo < hi; ) { mid = (lo + hi) / 2; e = &array[mid]; if (rel_ip < e->start_offset) hi = mid; else if (rel_ip >= e->end_offset) lo = mid + 1; else break; } /* * Return a pointer to a static copy of "e" if found, and * give back the module buffer if used. */ if (e) { BCOPY(e, &e_returned, sizeof(struct unw_table_entry)); e = &e_returned; } if (loc_array) FREEBUF(loc_array); if (rel_ip < e->start_offset || rel_ip >= e->end_offset) return NULL; return e; } static void compile_reg (struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; unsigned long val, rval; struct unw_insn insn; long need_nat_info; if (machdep->flags & UNW_PTREGS) { compile_reg_v2(sr, i, script); return; } if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target) return; opc = UNW_INSN_MOVE; val = rval = r->val; need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7); switch (r->where) { case UNW_WHERE_GR: if (rval >= 32) { opc = UNW_INSN_MOVE_STACKED; val = rval - 32; } else if (rval >= 4 && rval <= 7) { if (need_nat_info) { opc = UNW_INSN_MOVE2; need_nat_info = 0; } val = unw.preg_index[UNW_REG_R4 + (rval - 4)]; } else { opc = UNW_INSN_ADD_SP; val = -SIZE(pt_regs) + pt_regs_off(rval); } break; case UNW_WHERE_FR: if (rval <= 5) val = unw.preg_index[UNW_REG_F2 + (rval - 2)]; else if (rval >= 16 && rval <= 31) val = unw.preg_index[UNW_REG_F16 + (rval - 16)]; else { opc = UNW_INSN_ADD_SP; val = -SIZE(pt_regs); if (rval <= 9) val += struct_offset(struct pt_regs, f6) + 16*(rval - 6); else error(INFO, "unwind: kernel may not touch f%lu\n", rval); } break; case UNW_WHERE_BR: if (rval >= 1 && rval <= 5) val = unw.preg_index[UNW_REG_B1 + (rval - 1)]; else { opc = UNW_INSN_ADD_SP; val = -SIZE(pt_regs); if (rval == 0) val += struct_offset(struct pt_regs, b0); else if (rval == 6) val += struct_offset(struct pt_regs, b6); else val += struct_offset(struct pt_regs, b7); } break; case UNW_WHERE_SPREL: opc = UNW_INSN_ADD_SP; break; case UNW_WHERE_PSPREL: opc = UNW_INSN_ADD_PSP; break; default: error(INFO, "unwind: register %u has unexpected `where' value of %u\n", i, r->where); break; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); if (need_nat_info) emit_nat_info(sr, i, script); if (i == UNW_REG_PSP) { /* * info->psp must contain the _value_ of the previous * sp, not it's save location. We get this by * dereferencing the value we just stored in * info->psp: */ insn.opc = UNW_INSN_LOAD; insn.dst = insn.val = unw.preg_index[UNW_REG_PSP]; script_emit(script, insn); } } static void compile_reg_v2 (struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; unsigned long val, rval; struct unw_insn insn; long need_nat_info; if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target) return; opc = UNW_INSN_MOVE; val = rval = r->val; need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7); switch (r->where) { case UNW_WHERE_GR: if (rval >= 32) { opc = UNW_INSN_MOVE_STACKED; val = rval - 32; } else if (rval >= 4 && rval <= 7) { if (need_nat_info) { opc = UNW_INSN_MOVE2; need_nat_info = 0; } val = unw.preg_index[UNW_REG_R4 + (rval - 4)]; } else { /* register got spilled to a scratch register */ opc = UNW_INSN_MOVE_SCRATCH; val = pt_regs_off(rval); } break; case UNW_WHERE_FR: if (rval <= 5) val = unw.preg_index[UNW_REG_F2 + (rval - 2)]; else if (rval >= 16 && rval <= 31) val = unw.preg_index[UNW_REG_F16 + (rval - 16)]; else { opc = UNW_INSN_MOVE_SCRATCH; if (rval <= 11) val = offsetof(struct pt_regs, f6) + 16*(rval - 6); else error(INFO, "compile_reg: kernel may not touch f%lu\n", rval); } break; case UNW_WHERE_BR: if (rval >= 1 && rval <= 5) val = unw.preg_index[UNW_REG_B1 + (rval - 1)]; else { opc = UNW_INSN_MOVE_SCRATCH; if (rval == 0) val = offsetof(struct pt_regs, b0); else if (rval == 6) val = offsetof(struct pt_regs, b6); else val = offsetof(struct pt_regs, b7); } break; case UNW_WHERE_SPREL: opc = UNW_INSN_ADD_SP; break; case UNW_WHERE_PSPREL: opc = UNW_INSN_ADD_PSP; break; default: error(INFO, "compile_reg: register %u has unexpected `where' value of %u\n", i, r->where); break; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); if (need_nat_info) emit_nat_info(sr, i, script); if (i == UNW_REG_PSP) { /* * info->psp must contain the _value_ of the previous * sp, not it's save location. We get this by * dereferencing the value we just stored in * info->psp: */ insn.opc = UNW_INSN_LOAD; insn.dst = insn.val = unw.preg_index[UNW_REG_PSP]; script_emit(script, insn); } } #endif /* IA64 */ crash-7.1.4/cmdline.c0000775000000000000000000017126512634305150013120 0ustar rootroot/* cmdline.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static void restore_sanity(void); static void restore_ifile_sanity(void); static int pseudo_command(char *); static void check_special_handling(char *); static int is_executable_in_PATH(char *); static int is_shell_script(char *); static void list_aliases(char *); static int allocate_alias(int); static int alias_exists(char *); static void resolve_aliases(void); static int setup_redirect(int); int multiple_pipes(char **); static int output_command_to_pids(void); static void set_my_tty(void); static char *signame(int); static int setup_stdpipe(void); static void wait_for_children(ulong); #define ZOMBIES_ONLY (1) #define ALL_CHILDREN (2) int shell_command(char *); static void modify_orig_line(char *, struct args_input_file *); static void modify_expression_arg(char *, char **, struct args_input_file *); static int verify_args_input_file(char *); #define READLINE_LIBRARY #include #include #include static void readline_init(void); static struct alias_data alias_head = { 0 }; void process_command_line(void) { /* * Restore normal environment, clearing out any excess baggage * piled up by the previous command. */ restore_sanity(); fp = stdout; BZERO(pc->command_line, BUFSIZE); if (!(pc->flags & (READLINE|SILENT|CMDLINE_IFILE|RCHOME_IFILE|RCLOCAL_IFILE))) fprintf(fp, "%s", pc->prompt); fflush(fp); /* * Input can come from five possible sources: * * 1. an .rc file located in the user's HOME directory. * 2. an .rc file located in the current directory. * 3. an input file that was designated by the -i flag at * program invocation. * 4. from a terminal. * 5. from a pipe, if stdin is a pipe rather than a terminal. * * But first, handle the interruption of an input file caused * by a FATAL error in one of its commands. * */ if (pc->ifile_in_progress) { switch (pc->ifile_in_progress) { case RCHOME_IFILE: pc->flags |= INIT_IFILE|RCHOME_IFILE; sprintf(pc->command_line, "< %s/.%src", pc->home, pc->program_name); break; case RCLOCAL_IFILE: sprintf(pc->command_line, "< .%src", pc->program_name); pc->flags |= INIT_IFILE|RCLOCAL_IFILE; break; case CMDLINE_IFILE: sprintf(pc->command_line, "< %s", pc->input_file); pc->flags |= INIT_IFILE|CMDLINE_IFILE; break; case RUNTIME_IFILE: sprintf(pc->command_line, "%s", pc->runtime_ifile_cmd); pc->flags |= IFILE_ERROR; break; default: error(FATAL, "invalid input file\n"); } } else if (pc->flags & RCHOME_IFILE) { sprintf(pc->command_line, "< %s/.%src", pc->home, pc->program_name); pc->flags |= INIT_IFILE; } else if (pc->flags & RCLOCAL_IFILE) { sprintf(pc->command_line, "< .%src", pc->program_name); pc->flags |= INIT_IFILE; } else if (pc->flags & CMDLINE_IFILE) { sprintf(pc->command_line, "< %s", pc->input_file); pc->flags |= INIT_IFILE; } else if (pc->flags & TTY) { if (!(pc->readline = readline(pc->prompt))) { args[0] = NULL; fprintf(fp, "\n"); return; } strcpy(pc->command_line, pc->readline); free(pc->readline); clean_line(pc->command_line); pseudo_command(pc->command_line); strcpy(pc->orig_line, pc->command_line); if (strlen(pc->command_line) && !iscntrl(pc->command_line[0])) add_history(pc->command_line); check_special_handling(pc->command_line); } else { if (fgets(pc->command_line, BUFSIZE-1, stdin) == NULL) clean_exit(1); clean_line(pc->command_line); strcpy(pc->orig_line, pc->command_line); } /* * First clean out all linefeeds and leading/trailing spaces. * Then substitute aliases for the real thing they represent. */ clean_line(pc->command_line); resolve_aliases(); /* * Setup output redirection based upon the command line itself or * based upon the default scrolling behavior, if any. */ switch (setup_redirect(FROM_COMMAND_LINE)) { case REDIRECT_NOT_DONE: case REDIRECT_TO_STDPIPE: case REDIRECT_TO_PIPE: case REDIRECT_TO_FILE: break; case REDIRECT_SHELL_ESCAPE: case REDIRECT_SHELL_COMMAND: case REDIRECT_FAILURE: RESTART(); break; } /* * Setup the global argcnt and args[] array for use by everybody * during the life of this command. */ argcnt = parse_line(pc->command_line, args); } /* * Allow input file redirection without having to put a space between * the < and the filename. Allow the "pointer-to" asterisk to "touch" * the structure/union name. */ static void check_special_handling(char *s) { char local[BUFSIZE]; strcpy(local, s); if ((local[0] == '*') && (!whitespace(local[1]))) { sprintf(s, "* %s", &local[1]); return; } if ((local[0] == '<') && (!whitespace(local[1]))) { sprintf(s, "< %s", &local[1]); return; } } static int is_executable_in_PATH(char *filename) { char *buf1, *buf2; char *tok, *path; int retval; if ((path = getenv("PATH"))) { buf1 = GETBUF(strlen(path)+1); buf2 = GETBUF(strlen(path)+1); strcpy(buf2, path); } else return FALSE; retval = FALSE; tok = strtok(buf2, ":"); while (tok) { sprintf(buf1, "%s/%s", tok, filename); if (file_exists(buf1, NULL) && (access(buf1, X_OK) == 0)) { retval = TRUE; break; } tok = strtok(NULL, ":"); } FREEBUF(buf1); FREEBUF(buf2); return retval; } /* * At this point the only pseudo commands are the "r" (repeat) and * the "h" (history) command: * * 1. an "r" alone, or "!!" along, just means repeat the last command. * 2. an "r" followed by a number, means repeat that command from the * history table. * 3. an "!" followed by a number that is not the name of a command * in the user's PATH, means repeat that command from the history table. * 4. an "r" followed by one or more non-decimal characters means to * seek back until a line-beginning match is found. * 5. an "h" alone, or a string beginning with "hi", means history. */ static int pseudo_command(char *input) { int i; HIST_ENTRY *entry; int idx, found; char *p; clean_line(input); /* * Just dump all commands that have been entered to date. */ if (STREQ(input, "h") || STRNEQ(input, "hi")) { dump_history(); pc->command_line[0] = NULLCHAR; return TRUE; } if (STREQ(input, "r") || STREQ(input, "!!")) { if (!history_offset) error(FATAL, "no commands entered!\n"); entry = history_get(history_offset); strcpy(input, entry->line); fprintf(fp, "%s%s\n", pc->prompt, input); return TRUE; } if ((input[0] == 'r') && decimal(&input[1], 0)) { if (!history_offset) error(FATAL, "no commands entered!\n"); p = &input[1]; goto rerun; } if ((input[0] == '!') && decimal(&input[1], 0) && !is_executable_in_PATH(first_nonspace(&input[1]))) { p = first_nonspace(&input[1]); goto rerun; } if (STRNEQ(input, "r ")) { if (!history_offset) error(FATAL, "no commands entered!\n"); p = first_nonspace(&input[1]); rerun: if (decimal(p, 0)) { idx = atoi(p); if (idx == 0) goto invalid_repeat_request; if (idx > history_offset) error(FATAL, "command %d not entered yet!\n", idx); entry = history_get(idx); strcpy(input, entry->line); fprintf(fp, "%s%s\n", pc->prompt, input); return TRUE; } idx = -1; found = FALSE; for (i = history_offset; i > 0; i--) { entry = history_get(i); if (STRNEQ(entry->line, p)) { found = TRUE; break; } } if (found) { strcpy(input, entry->line); fprintf(fp, "%s%s\n", pc->prompt, input); return TRUE; } invalid_repeat_request: fprintf(fp, "invalid repeat request: %s\n", input); strcpy(input, ""); return TRUE; } return FALSE; } /* * Dump the history table in first-to-last chronological order. */ void dump_history(void) { int i; HIST_ENTRY **the_history; HIST_ENTRY *entry; if (!history_offset) error(FATAL, "no commands entered!\n"); the_history = history_list(); for (i = 0; i < history_offset; i++) { entry = the_history[i]; fprintf(fp, "[%d] %s\n", i+1, entry->line); } } /* * Pager arguments. */ static char *less_argv[5] = { "/usr/bin/less", "-E", "-X", "-Ps -- MORE -- forward\\: , or j backward\\: b or k quit\\: q", NULL }; static char *more_argv[2] = { "/bin/more", NULL }; static char **CRASHPAGER_argv = NULL; int CRASHPAGER_valid(void) { int i, c; char *env, *CRASHPAGER_buf; char *arglist[MAXARGS]; if (CRASHPAGER_argv) return TRUE; if (!(env = getenv("CRASHPAGER"))) return FALSE; if (strstr(env, "|") || strstr(env, "<") || strstr(env, ">")) { error(INFO, "CRASHPAGER ignored: contains invalid character: \"%s\"\n", env); return FALSE; } if ((CRASHPAGER_buf = (char *)malloc(strlen(env)+1)) == NULL) return FALSE; strcpy(CRASHPAGER_buf, env); if (!(c = parse_line(CRASHPAGER_buf, arglist)) || !file_exists(arglist[0], NULL) || access(arglist[0], X_OK) || !(CRASHPAGER_argv = (char **)malloc(sizeof(char *) * (c+1)))) { free(CRASHPAGER_buf); if (strlen(env)) error(INFO, "CRASHPAGER ignored: \"%s\"\n", env); return FALSE; } for (i = 0; i < c; i++) CRASHPAGER_argv[i] = arglist[i]; CRASHPAGER_argv[i] = NULL; return TRUE; } /* * Set up a command string buffer for error/help output. */ char * setup_scroll_command(void) { char *buf; long i, len; if (!(pc->flags & SCROLL)) return NULL; switch (pc->scroll_command) { case SCROLL_LESS: buf = GETBUF(strlen(less_argv[0])+1); strcpy(buf, less_argv[0]); break; case SCROLL_MORE: buf = GETBUF(strlen(more_argv[0])+1); strcpy(buf, more_argv[0]); break; case SCROLL_CRASHPAGER: for (i = len = 0; CRASHPAGER_argv[i]; i++) len += strlen(CRASHPAGER_argv[i])+1; buf = GETBUF(len); for (i = 0; CRASHPAGER_argv[i]; i++) { sprintf(&buf[strlen(buf)], "%s%s", i ? " " : "", CRASHPAGER_argv[i]); } break; default: return NULL; } return buf; } /* * Parse the command line for pipe or redirect characters: * * 1. if a "|" character is found, popen() what comes after it, and * modify the contents of the global "fp" FILE pointer. * 2. if one or two ">" characters are found, fopen() the filename that * follows, and modify the contents of the global "fp" FILE pointer. * * Care is taken to segregate: * * 1. expressions encompassed by parentheses, or * 2. strings encompassed by single or double quotation marks * * When either of the above are in affect, no redirection is done. * * Lastly, if no redirection is requested by the user on the command line, * output is passed to the default scrolling command, which is popen()'d * and again, the contents of the global "fp" FILE pointer is modified. * This default behavior is not performed if the command is coming from * an input file, nor if scrolling has been turned off. */ static int setup_redirect(int origin) { char *p, which; int append; int expression; int string; int ret ATTRIBUTE_UNUSED; FILE *pipe; FILE *ofile; pc->redirect = origin; pc->eoc_index = 0; p = pc->command_line; if (STREQ(p, "|") || STREQ(p, "!")) { ret = system("/bin/sh"); pc->redirect |= REDIRECT_SHELL_ESCAPE; return REDIRECT_SHELL_ESCAPE; } if (FIRSTCHAR(p) == '|' || FIRSTCHAR(p) == '!') pc->redirect |= REDIRECT_SHELL_COMMAND; expression = 0; string = FALSE; while (*p) { if (*p == '(') expression++; if (*p == ')') expression--; if ((*p == '"') || (*p == '\'')) string = !string; if (!(expression || string) && ((*p == '|') || (*p == '!'))) { which = *p; *p = NULLCHAR; pc->eoc_index = p - pc->command_line; p++; p = strip_beginning_whitespace(p); if (!strlen(p)) { error(INFO, "no shell command after '%c'\n", which); pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } if (LASTCHAR(p) == '|') error(FATAL_RESTART, "pipe to nowhere?\n"); if (pc->redirect & REDIRECT_SHELL_COMMAND) return shell_command(p); if ((pipe = popen(p, "w")) == NULL) { error(INFO, "cannot open pipe\n"); pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } setbuf(pipe, NULL); switch (origin) { case FROM_COMMAND_LINE: fp = pc->pipe = pipe; break; case FROM_INPUT_FILE: fp = pc->ifile_pipe = pipe; break; } if (multiple_pipes(&p)) pc->redirect |= REDIRECT_MULTI_PIPE; strcpy(pc->pipe_command, p); null_first_space(pc->pipe_command); pc->redirect |= REDIRECT_TO_PIPE; if (!(pc->redirect & REDIRECT_SHELL_COMMAND)) { if ((pc->pipe_pid = output_command_to_pids())) pc->redirect |= REDIRECT_PID_KNOWN; else error(FATAL_RESTART, "pipe operation failed\n"); } return REDIRECT_TO_PIPE; } if (!(expression || string) && (*p == '>') && !((p > pc->command_line) && (*(p-1) == '-'))) { append = FALSE; *p = NULLCHAR; pc->eoc_index = p - pc->command_line; if (*(p+1) == '>') { append = TRUE; *p = NULLCHAR; p++; } p++; p = strip_beginning_whitespace(p); if (!strlen(p)) { error(INFO, "no file name after %s\n", append ? ">>" : ">"); pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } if (pc->flags & IFILE_ERROR) append = TRUE; if ((ofile = fopen(p, append ? "a+" : "w+")) == NULL) { error(INFO, "unable to open %s\n", p); pc->redirect = REDIRECT_FAILURE; return REDIRECT_FAILURE; } setbuf(ofile, NULL); switch (origin) { case FROM_COMMAND_LINE: fp = pc->ofile = ofile; break; case FROM_INPUT_FILE: fp = pc->ifile_ofile = ofile; break; } pc->redirect |= REDIRECT_TO_FILE; return REDIRECT_TO_FILE; } p++; } if ((origin == FROM_COMMAND_LINE) && (pc->flags & TTY) && (pc->flags & SCROLL) && pc->scroll_command) { if (!strlen(pc->command_line) || STREQ(pc->command_line, "q") || STREQ(pc->command_line, "Q") || STREQ(pc->command_line, "exit") || STRNEQ(pc->command_line, "<")) { pc->redirect |= REDIRECT_NOT_DONE; return REDIRECT_NOT_DONE; } if (!setup_stdpipe()) { error(INFO, "cannot open pipe\n"); pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } fp = pc->stdpipe; pc->redirect |= REDIRECT_TO_STDPIPE; switch (pc->scroll_command) { case SCROLL_LESS: strcpy(pc->pipe_command, less_argv[0]); break; case SCROLL_MORE: strcpy(pc->pipe_command, more_argv[0]); break; case SCROLL_CRASHPAGER: strcpy(pc->pipe_command, CRASHPAGER_argv[0]); break; } return REDIRECT_TO_STDPIPE; } pc->redirect |= REDIRECT_NOT_DONE; return REDIRECT_NOT_DONE; } /* * Find the last command in an input line that possibly contains * multiple pipes. */ int multiple_pipes(char **input) { char *p, *found; int quote; found = NULL; quote = FALSE; for (p = *input; *p; p++) { if ((*p == '\'') || (*p == '"')) { quote = !quote; continue; } else if (quote) continue; if (*p == '|') { if (STRNEQ(p, "||")) break; found = first_nonspace(p+1); } } if (found) { *input = found; return TRUE; } else return FALSE; } void debug_redirect(char *s) { int others; int alive; others = 0; console("%s: (", s); if (pc->redirect & FROM_COMMAND_LINE) console("%sFROM_COMMAND_LINE", others++ ? "|" : ""); if (pc->redirect & FROM_INPUT_FILE) console("%sFROM_INPUT_FILE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_NOT_DONE) console("%sREDIRECT_NOT_DONE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_PIPE) console("%sREDIRECT_TO_PIPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_STDPIPE) console("%sREDIRECT_TO_STDPIPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_FILE) console("%sREDIRECT_TO_FILE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_FAILURE) console("%sREDIRECT_FAILURE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_SHELL_ESCAPE) console("%sREDIRECT_SHELL_ESCAPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_SHELL_COMMAND) console("%sREDIRECT_SHELL_COMMAND", others++ ? "|" : ""); if (pc->redirect & REDIRECT_PID_KNOWN) console("%sREDIRECT_PID_KNOWN", others++ ? "|" : ""); if (pc->redirect & REDIRECT_MULTI_PIPE) console("%sREDIRECT_MULTI_PIPE", others++ ? "|" : ""); console(")\n"); if (pc->pipe_pid || strlen(pc->pipe_command)) { if (pc->pipe_pid && PID_ALIVE(pc->pipe_pid)) alive = TRUE; else alive = FALSE; console("pipe_pid: %d (%s) pipe_command: %s\n", pc->pipe_pid, alive ? "alive" : "dead", pc->pipe_command); } } /* * Determine whether the pid receiving the current piped output is still * alive. * * NOTE: This routine returns TRUE by default, and only returns FALSE if * the pipe_pid exists *and* it's known to have died. Therefore the * caller must be cognizant of pc->pipe_pid or pc->stdpipe_pid. */ int output_open(void) { int waitstatus, waitret; if (!(pc->flags & TTY)) return TRUE; switch (pc->redirect & PIPE_OPTIONS) { case (REDIRECT_TO_STDPIPE|FROM_COMMAND_LINE): waitret = waitpid(pc->stdpipe_pid, &waitstatus, WNOHANG); if ((waitret == pc->stdpipe_pid) || (waitret == -1)) return FALSE; break; case (REDIRECT_TO_PIPE|FROM_INPUT_FILE): if (pc->curcmd_flags & REPEAT) break; /* FALLTHROUGH */ case (REDIRECT_TO_PIPE|FROM_COMMAND_LINE): switch (pc->redirect & (REDIRECT_MULTI_PIPE)) { case REDIRECT_MULTI_PIPE: if (!PID_ALIVE(pc->pipe_pid)) return FALSE; break; default: waitret = waitpid(pc->pipe_pid, &waitstatus, WNOHANG); if (waitret == pc->pipe_pid) return FALSE; if (waitret == -1) { /* intervening sh */ if (!PID_ALIVE(pc->pipe_pid)) return FALSE; } break; } break; default: break; } return TRUE; } /* * Determine the pids of the current popen'd shell and output command. * This is all done using /proc; the ps kludge at the bottom of this * routine is legacy, and should only get executed if /proc doesn't exist. */ static int output_command_to_pids(void) { DIR *dirp; struct dirent *dp; FILE *stp; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char lookfor[BUFSIZE]; char *pid, *name, *status, *p_pid, *pgrp; char *arglist[MAXARGS]; int argc; FILE *pipe; int retries, shell_has_exited; retries = 0; shell_has_exited = FALSE; pc->pipe_pid = pc->pipe_shell_pid = 0; sprintf(lookfor, "(%s)", pc->pipe_command); stall(1000); retry: if (is_directory("/proc") && (dirp = opendir("/proc"))) { for (dp = readdir(dirp); dp && !pc->pipe_pid; dp = readdir(dirp)) { if (!decimal(dp->d_name, 0)) continue; sprintf(buf1, "/proc/%s/stat", dp->d_name); if (file_exists(buf1, NULL) && (stp = fopen(buf1, "r"))) { if (fgets(buf2, BUFSIZE, stp)) { pid = strtok(buf2, " "); name = strtok(NULL, " "); status = strtok(NULL, " "); p_pid = strtok(NULL, " "); pgrp = strtok(NULL, " "); if (STREQ(name, "(sh)") && (atoi(p_pid) == getpid())) { pc->pipe_shell_pid = atoi(pid); if (STREQ(status, "Z")) shell_has_exited = TRUE; } if (STREQ(name, lookfor) && ((atoi(p_pid) == getpid()) || (atoi(p_pid) == pc->pipe_shell_pid) || (atoi(pgrp) == getpid()))) { pc->pipe_pid = atoi(pid); console( "FOUND[%d] (%d->%d->%d) %s %s p_pid: %s pgrp: %s\n", retries, getpid(), pc->pipe_shell_pid, pc->pipe_pid, name, status, p_pid, pgrp); } } fclose(stp); } } closedir(dirp); } if (!pc->pipe_pid && !shell_has_exited && ((retries++ < 10) || pc->pipe_shell_pid)) { stall(1000); goto retry; } console("getpid: %d pipe_shell_pid: %d pipe_pid: %d\n", getpid(), pc->pipe_shell_pid, pc->pipe_pid); if (pc->pipe_pid) return pc->pipe_pid; sprintf(buf1, "ps -ft %s", pc->my_tty); console("%s: ", buf1); if ((pipe = popen(buf1, "r")) == NULL) { error(INFO, "cannot determine output pid\n"); return 0; } while (fgets(buf1, BUFSIZE, pipe)) { argc = parse_line(buf1, arglist); if ((argc >= 8) && STREQ(arglist[7], pc->pipe_command) && STRNEQ(pc->my_tty, arglist[5])) { pc->pipe_pid = atoi(arglist[1]); break; } } pclose(pipe); console("%d\n", pc->pipe_pid); return pc->pipe_pid; } /* * Close straggling, piped-to, output commands. */ void close_output(void) { if ((pc->flags & TTY) && (pc->pipe_pid || strlen(pc->pipe_command)) && output_open()) kill(pc->pipe_pid, 9); } /* * Initialize what's needed for the command line: * * 1. termios structures for raw and cooked terminal mode. * 2. set up SIGINT and SIGPIPE handlers for aborted commands. * 3. set up the command history table. * 4. create the prompt string. */ void cmdline_init(void) { int fd = 0; /* * Stash a copy of the original termios setup. * Build a raw version for quick use for each command entry. */ if (isatty(fileno(stdin)) && ((fd = open("/dev/tty", O_RDONLY)) >= 0)) { if (tcgetattr(fd, &pc->termios_orig) == -1) error(FATAL, "tcgetattr /dev/tty: %s\n", strerror(errno)); if (tcgetattr(fd, &pc->termios_raw) == -1) error(FATAL, "tcgetattr /dev/tty: %s\n", strerror(errno)); close(fd); pc->termios_raw.c_lflag &= ~ECHO & ~ICANON; pc->termios_raw.c_cc[VMIN] = (char)1; pc->termios_raw.c_cc[VTIME] = (char)0; restore_sanity(); pc->flags |= TTY; set_my_tty(); SIGACTION(SIGINT, restart, &pc->sigaction, NULL); readline_init(); } else { if (fd < 0) error(INFO, "/dev/tty: %s\n", strerror(errno)); if (!(pc->flags & SILENT)) fprintf(fp, "NOTE: stdin: not a tty\n\n"); fflush(fp); pc->flags &= ~TTY; } SIGACTION(SIGPIPE, SIG_IGN, &pc->sigaction, NULL); set_command_prompt(NULL); } /* * Create and stash the original prompt, but allow changes during runtime. */ void set_command_prompt(char *new_prompt) { static char *orig_prompt = NULL; if (!orig_prompt) { if (!(orig_prompt = (char *)malloc(strlen(pc->program_name)+3))) error(FATAL, "cannot malloc prompt string\n"); sprintf(orig_prompt, "%s> ", pc->program_name); } if (new_prompt) pc->prompt = new_prompt; else pc->prompt = orig_prompt; } /* * SIGINT, SIGPIPE, and SIGSEGV handler. * Signal number 0 is sent for a generic restart. */ #define MAX_RECURSIVE_SIGNALS (10) #define MAX_SIGINTS_ACCEPTED (1) void restart(int sig) { static int in_restart = 0; console("restart (%s) %s\n", signame(sig), pc->flags & IN_GDB ? "(in gdb)" : "(in crash)"); if (sig == SIGUSR2) clean_exit(1); if (pc->flags & IN_RESTART) { fprintf(stderr, "\nembedded signal received (%s): recursive restart call\n", signame(sig)); if (++in_restart < MAX_RECURSIVE_SIGNALS) return; fprintf(stderr, "bailing out...\n"); clean_exit(1); } else { pc->flags |= IN_RESTART; in_restart = 0; } switch (sig) { case SIGSEGV: fflush(fp); fprintf(stderr, " \n", pc->flags & IN_GDB ? " in gdb" : ""); case 0: case SIGPIPE: restore_sanity(); break; case SIGINT: SIGACTION(SIGINT, restart, &pc->sigaction, NULL); pc->flags |= _SIGINT_; pc->sigint_cnt++; pc->flags &= ~IN_RESTART; if (pc->sigint_cnt == MAX_SIGINTS_ACCEPTED) { restore_sanity(); if (pc->ifile_in_progress) { pc->ifile_in_progress = 0; pc->ifile_offset = 0; } break; } return; default: fprintf(stderr, "unexpected signal received: %s\n", signame(sig)); restore_sanity(); close_output(); break; } fprintf(stderr, "\n"); pc->flags &= ~(IN_FOREACH|IN_GDB|IN_RESTART); longjmp(pc->main_loop_env, 1); } /* * Return a signal name string, or a number if the signal is not listed. */ static char * signame(int sig) { static char sigbuf[20]; switch (sig) { case SIGINT: sprintf(sigbuf, "SIGINT-%d", pc->sigint_cnt+1); return sigbuf; case SIGPIPE: return "SIGPIPE"; case SIGSEGV: return "SIGSEGV"; default: sprintf(sigbuf, "%d", sig); return sigbuf; } } /* * Restore the program environment to the state it was in before the * last command was executed: * * 1. close all temporarily opened pipes and output files. * 2. set the terminal back to normal cooked mode. * 3. free all temporary buffers. * 4. restore the last known output radix. */ static void restore_sanity(void) { int fd, waitstatus; struct extension_table *ext; struct command_table_entry *cp; if (pc->stdpipe) { close(fileno(pc->stdpipe)); pc->stdpipe = NULL; if (pc->stdpipe_pid && PID_ALIVE(pc->stdpipe_pid)) { while (!waitpid(pc->stdpipe_pid, &waitstatus, WNOHANG)) stall(1000); } pc->stdpipe_pid = 0; } if (pc->pipe) { close(fileno(pc->pipe)); pc->pipe = NULL; console("wait for redirect %d->%d to finish...\n", pc->pipe_shell_pid, pc->pipe_pid); if (pc->pipe_pid) while (PID_ALIVE(pc->pipe_pid)) { waitpid(pc->pipe_pid, &waitstatus, WNOHANG); stall(1000); } if (pc->pipe_shell_pid) while (PID_ALIVE(pc->pipe_shell_pid)) { waitpid(pc->pipe_shell_pid, &waitstatus, WNOHANG); stall(1000); } pc->pipe_pid = 0; } if (pc->ifile_pipe) { fflush(pc->ifile_pipe); close(fileno(pc->ifile_pipe)); pc->ifile_pipe = NULL; if (pc->pipe_pid && ((pc->redirect & (PIPE_OPTIONS|REDIRECT_PID_KNOWN)) == (FROM_INPUT_FILE|REDIRECT_TO_PIPE|REDIRECT_PID_KNOWN))) { console("wait for redirect %d->%d to finish...\n", pc->pipe_shell_pid, pc->pipe_pid); while (PID_ALIVE(pc->pipe_pid)) { waitpid(pc->pipe_pid, &waitstatus, WNOHANG); stall(1000); } if (pc->pipe_shell_pid) while (PID_ALIVE(pc->pipe_shell_pid)) { waitpid(pc->pipe_shell_pid, &waitstatus, WNOHANG); stall(1000); } if (pc->redirect & (REDIRECT_MULTI_PIPE)) wait_for_children(ALL_CHILDREN); } } if (pc->ofile) { fclose(pc->ofile); pc->ofile = NULL; } if (pc->ifile_ofile) { fclose(pc->ifile_ofile); pc->ifile_ofile = NULL; } if (pc->ifile) { fclose(pc->ifile); pc->ifile = NULL; } if (pc->args_ifile) { fclose(pc->args_ifile); pc->args_ifile = NULL; } if (pc->tmpfile) close_tmpfile(); if (pc->tmpfile2) close_tmpfile2(); if (pc->cmd_cleanup) pc->cmd_cleanup(pc->cmd_cleanup_arg); if (pc->flags & TTY) { if ((fd = open("/dev/tty", O_RDONLY)) < 0) { console("/dev/tty: %s\n", strerror(errno)); clean_exit(1); } if (tcsetattr(fd, TCSANOW, &pc->termios_orig) == -1) error(FATAL, "tcsetattr /dev/tty: %s\n", strerror(errno)); close(fd); } wait_for_children(ZOMBIES_ONLY); pc->flags &= ~(INIT_IFILE|RUNTIME_IFILE|IFILE_ERROR|_SIGINT_|PLEASE_WAIT); pc->sigint_cnt = 0; pc->redirect = 0; pc->pipe_command[0] = NULLCHAR; pc->pipe_pid = 0; pc->pipe_shell_pid = 0; pc->sbrk = sbrk(0); if ((pc->curcmd_flags & (UD2A_INSTRUCTION|BAD_INSTRUCTION)) == (UD2A_INSTRUCTION|BAD_INSTRUCTION)) error(WARNING, "A (bad) instruction was noted in last disassembly.\n" " Use \"dis -b [number]\" to set/restore the number of\n" " encoded bytes to skip after a ud2a (BUG) instruction.\n"); pc->curcmd_flags = 0; pc->curcmd_private = 0; restore_gdb_sanity(); free_all_bufs(); /* * Clear the structure cache references -- no-ops if DUMPFILE(). */ clear_task_cache(); clear_machdep_cache(); clear_swap_info_cache(); clear_file_cache(); clear_dentry_cache(); clear_inode_cache(); clear_vma_cache(); clear_active_set(); if (kt->ikconfig_flags & IKCONFIG_LOADED) read_in_kernel_config(IKCFG_FREE); /* * Call the cleanup() function of any extension. */ for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (cp->flags & CLEANUP) (*cp->func)(); } } if (CRASHDEBUG(5)) { dump_filesys_table(0); dump_vma_cache(0); dump_text_value_cache(0); } if (REMOTE()) remote_clear_pipeline(); hq_close(); } /* * Similar to above, but only called in between each command that is * read from an input file. */ static void restore_ifile_sanity(void) { int fd; pc->flags &= ~IFILE_ERROR; if (pc->ifile_pipe) { close(fileno(pc->ifile_pipe)); pc->ifile_pipe = NULL; } if (pc->ifile_ofile) { fclose(pc->ifile_ofile); pc->ifile_ofile = NULL; } if (pc->flags & TTY) { if ((fd = open("/dev/tty", O_RDONLY)) < 0) { console("/dev/tty: %s\n", strerror(errno)); clean_exit(1); } if (tcsetattr(fd, TCSANOW, &pc->termios_orig) == -1) error(FATAL, "tcsetattr /dev/tty: %s\n", strerror(errno)); close(fd); } if (pc->tmpfile2) { close_tmpfile2(); } restore_gdb_sanity(); free_all_bufs(); hq_close(); } /* * Check whether a SIGINT was received during the execution of a command, * clearing the flag if it was set. This allows individual commands or * entities to do whatever is appropriate to handle CTRL-C. */ int received_SIGINT(void) { if (pc->flags & _SIGINT_) { pc->flags &= ~_SIGINT_; pc->sigint_cnt = 0; if (pc->ifile_in_progress) { pc->ifile_in_progress = 0; pc->ifile_offset = 0; } return TRUE; } else return FALSE; } /* * Look for an executable file that begins with #! */ static int is_shell_script(char *s) { int fd; char interp[2]; struct stat sbuf; if ((fd = open(s, O_RDONLY)) < 0) return FALSE; if (isatty(fd)) return FALSE; if (read(fd, interp, 2) != 2) { close(fd); return FALSE; } if (!STRNEQ(interp, "#!")) { close(fd); return FALSE; } close(fd); if (stat(s, &sbuf) == -1) return FALSE; if (!(sbuf.st_mode & (S_IXUSR|S_IXGRP|S_IXOTH))) return FALSE; return TRUE; } /* * After verifying the user's input file, loop through each line, executing * one command at a time. This command pretty much does the same as * get_command_line(), but also kicks off the command execution as well. * It's kept self-contained, as indicated by the RUNTIME_IFILE flag, and * keeps its own internal sanity by calling restore_ifile_sanity() between * each line. */ void exec_input_file(void) { char *file; FILE *incoming_fp; char buf[BUFSIZE]; ulong this; /* * Do start-up .rc or input files in the proper order. */ if (pc->flags & RCHOME_IFILE) { this = RCHOME_IFILE; pc->flags &= ~RCHOME_IFILE; } else if (pc->flags & RCLOCAL_IFILE) { this = RCLOCAL_IFILE; pc->flags &= ~RCLOCAL_IFILE; } else if (pc->flags & CMDLINE_IFILE) { this = CMDLINE_IFILE; pc->flags &= ~CMDLINE_IFILE; } else this = 0; if (pc->flags & RUNTIME_IFILE) { error(INFO, "embedded input files not allowed!\n"); return; } if (argcnt < 2) { error(INFO, "no input file entered!\n"); return; } else file = args[1]; if (!file_exists(file, NULL)) { error(INFO, "%s: %s\n", file, strerror(ENOENT)); return; } if (is_elf_file(file)) { error(INFO, "input from executable files not supported yet!\n"); return; } if (is_shell_script(file)) { error(INFO, "input from shell scripts not supported yet!\n"); return; } if ((pc->ifile = fopen(file, "r")) == NULL) { error(INFO, "%s: %s\n", file, strerror(errno)); return; } pc->flags |= RUNTIME_IFILE; incoming_fp = fp; /* * Handle runtime commands that use input files. */ if ((pc->ifile_in_progress = this) == 0) { if (!pc->runtime_ifile_cmd) { if (!(pc->runtime_ifile_cmd = (char *)malloc(BUFSIZE))) { error(INFO, "cannot malloc input file command line buffer\n"); return; } BZERO(pc->runtime_ifile_cmd, BUFSIZE); } if (!strlen(pc->runtime_ifile_cmd)) strcpy(pc->runtime_ifile_cmd, pc->orig_line); pc->ifile_in_progress = RUNTIME_IFILE; } /* * If there's an offset, then there was a FATAL error caused * by the last command executed from the input file. */ if (pc->ifile_offset) fseek(pc->ifile, (long)pc->ifile_offset, SEEK_SET); while (fgets(buf, BUFSIZE-1, pc->ifile)) { /* * Restore normal environment. */ fp = incoming_fp; restore_ifile_sanity(); BZERO(pc->command_line, BUFSIZE); BZERO(pc->orig_line, BUFSIZE); if (this & (RCHOME_IFILE|RCLOCAL_IFILE)) pc->curcmd_flags |= FROM_RCFILE; pc->ifile_offset = ftell(pc->ifile); if (STRNEQ(buf, "#") || STREQ(buf, "\n")) continue; check_special_handling(buf); strcpy(pc->command_line, buf); clean_line(pc->command_line); strcpy(pc->orig_line, pc->command_line); strip_linefeeds(pc->orig_line); resolve_aliases(); switch (setup_redirect(FROM_INPUT_FILE)) { case REDIRECT_NOT_DONE: case REDIRECT_TO_PIPE: case REDIRECT_TO_FILE: break; case REDIRECT_SHELL_ESCAPE: case REDIRECT_SHELL_COMMAND: continue; case REDIRECT_FAILURE: goto done_input; } if (CRASHDEBUG(1)) console(buf); if (!(argcnt = parse_line(pc->command_line, args))) continue; if (!(pc->flags & SILENT)) { fprintf(fp, "%s%s", pc->prompt, buf); fflush(fp); } exec_command(); if (received_SIGINT()) goto done_input; } done_input: fclose(pc->ifile); pc->ifile = NULL; pc->flags &= ~RUNTIME_IFILE; pc->ifile_offset = 0; if (pc->runtime_ifile_cmd) BZERO(pc->runtime_ifile_cmd, BUFSIZE); pc->ifile_in_progress = 0; } /* * Prime the alias list with a few built-in's. */ void alias_init(char *inbuf) { char buf[BUFSIZE]; if (inbuf) { strcpy(buf, inbuf); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); return; } strcpy(buf, "alias man help"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias ? help"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias quit q"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias sf set scroll off"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias sn set scroll on"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias hex set radix 16"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias dec set radix 10"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias g gdb"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias px p -x"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias pd p -d"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias for foreach"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias size *"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias dmesg log"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias lsmod mod"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); } /* * Before the command line is parsed, take a snapshot and parse the snapshot. * If args[0] is an known alias, recreate the pc->command_line string with * the alias substitution. */ static void resolve_aliases(void) { int i; struct alias_data *ad; int found; char *p1, *remainder; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!strlen(pc->command_line)) return; strcpy(buf1, pc->command_line); argcnt = parse_line(buf1, args); if (argcnt > 1) { strcpy(buf2, &pc->command_line[args[1] - buf1]); remainder = buf2; } else remainder = NULL; found = FALSE; for (ad = alias_head.next; ad; ad = ad->next) { if (STREQ(ad->alias, args[0])) { for (i = 0; i < ad->argcnt; i++) args[i] = ad->args[i]; found = TRUE; break; } } if (!found) return; BZERO(pc->command_line, BUFSIZE); p1 = pc->command_line; for (i = 0; i < ad->argcnt; i++) { snprintf(p1, BUFSIZE - (p1-pc->command_line), "%s ", args[i]); while (*p1) p1++; if ((p1 - pc->command_line) >= BUFSIZE) break; } if (remainder) { if ((strlen(remainder)+strlen(pc->command_line)) < BUFSIZE) strcat(pc->command_line, remainder); else error(INFO, "command line overflow.\n"); } else if (strlen(pc->command_line) >= (BUFSIZE-1)) error(INFO, "command line overflow.\n"); clean_line(pc->command_line); } /* * If input string is an alias, return a pointer to the alias_data struct. */ struct alias_data * is_alias(char *s) { struct alias_data *ad; for (ad = alias_head.next; ad; ad = ad->next) { if (STREQ(ad->alias, s)) return(ad); } return NULL; } /* * .rc file commands that are "set" commands may be performed prior * to initialization, so pass them to cmd_set() for consideration. * All other commands are flagged for execution by exec_input_file() * after session initialization is complete. */ void resolve_rc_cmd(char *s, int origin) { clean_line(s); if (*s == '#') return; if ((argcnt = parse_line(s, args)) == 0) return; if (STREQ(args[0], "set")) { optind = 0; cmd_set(); } switch (origin) { case ALIAS_RCHOME: pc->flags |= RCHOME_IFILE; break; case ALIAS_RCLOCAL: pc->flags |= RCLOCAL_IFILE; break; } return; } /* * The "alias" command. With no arguments, list all aliases. With one * argument -- which must be an alias -- display the string it's aliased to. * With two or more arguments, setup a new alias, where the first argument * is the alias, and the remaining arguments make up the alias string. * If the second arg is the NULL string "", delete the alias. */ void cmd_alias(void) { if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); switch (argcnt) { case 1: list_aliases(NULL); break; case 2: list_aliases(args[1]); break; default: if (allocate_alias(ALIAS_RUNTIME)) list_aliases(args[1]); break; } } /* * Dump the current set of aliases. */ static void list_aliases(char *s) { int i; struct alias_data *ad; int found, precision; char buf[BUFSIZE]; if (!alias_head.next) { error(INFO, "alias list is empty\n"); return; } BZERO(buf, BUFSIZE); found = FALSE; precision = 7; for (ad = alias_head.next; ad; ad = ad->next) { switch (ad->origin) { case ALIAS_RCLOCAL: sprintf(buf, ".%src", pc->program_name); if (strlen(buf) > precision) precision = strlen(buf); break; case ALIAS_RCHOME: sprintf(buf, "$HOME/.%src", pc->program_name); if (strlen(buf) > precision) precision = strlen(buf); break; } } fprintf(fp, "ORIGIN"); pad_line(fp, precision-6, ' '); BZERO(buf, BUFSIZE); fprintf(fp, " ALIAS COMMAND\n"); for (ad = alias_head.next; ad; ad = ad->next) { if (s && !STREQ(s, ad->alias)) continue; found = TRUE; switch (ad->origin) { case ALIAS_RUNTIME: sprintf(buf, "runtime"); break; case ALIAS_RCLOCAL: sprintf(buf, ".%src", pc->program_name); break; case ALIAS_RCHOME: sprintf(buf, "$HOME/.%src", pc->program_name); break; case ALIAS_BUILTIN: sprintf(buf, "builtin"); break; } fprintf(fp, "%s ", buf); pad_line(fp, precision-strlen(buf), ' '); fprintf(fp, "%-7s ", ad->alias); for (i = 0; i < ad->argcnt; i++) { fprintf(fp, "%s ", ad->args[i]); } fprintf(fp, "\n"); } if (s && !found) fprintf(fp, "alias does not exist: %s\n", s); } /* * Verify the alias request set up in the args[] array: * * 1. make sure that the alias string starts with a legitimate command. * 2. if the already exists, deallocate its current version. * * Then malloc space for the alias string, and link it in to the alias list. */ static int allocate_alias(int origin) { int i; int size; struct alias_data *ad; struct alias_data *newad; char *p1, *enclosed_string; int found; if ((enclosed_string = strstr(args[2], " "))) *enclosed_string = NULLCHAR; found = FALSE; if (get_command_table_entry(args[1])) { error(INFO, "cannot alias existing command name: %s\n", args[1]); return FALSE; } if (get_command_table_entry(args[2])) found = TRUE; if (!found) { if (!strlen(args[2])) { if (alias_exists(args[1])) { deallocate_alias(args[1]); fprintf(fp, "alias deleted: %s\n", args[1]); } } else { error(INFO, "invalid alias attempt on non-existent command: %s\n", args[2]); } return FALSE; } if (alias_exists(args[1])) deallocate_alias(args[1]); if (enclosed_string) *enclosed_string = ' '; size = sizeof(struct alias_data) + argcnt; for (i = 0; i < argcnt; i++) size += strlen(args[i]); if ((newad = (struct alias_data *)malloc(size+1)) == NULL) { error(INFO, "alias_data malloc: %s\n", strerror(errno)); return FALSE; } BZERO(newad, size); newad->next = NULL; newad->size = size; newad->origin = origin; p1 = newad->argbuf; for (i = 1; i < argcnt; i++) { sprintf(p1, "%s ", args[i]); while (*p1) p1++; } p1 = strstr(newad->argbuf, " "); *p1 = NULLCHAR; newad->alias = newad->argbuf; newad->argcnt = parse_line(p1+1, newad->args); for (ad = &alias_head; ad->next; ad = ad->next) ; ad->next = newad; return TRUE; } /* * Check whether the passed-in string is a currently-existing alias. */ static int alias_exists(char *s) { struct alias_data *ad; if (!alias_head.next) return FALSE; for (ad = alias_head.next; ad; ad = ad->next) if (STREQ(ad->alias, s)) return TRUE; return FALSE; } /* * If the passed-in string is an alias, delink it and free its memory. */ void deallocate_alias(char *s) { struct alias_data *ad, *lastad; for (ad = alias_head.next, lastad = &alias_head; ad; ad = ad->next) { if (!STREQ(ad->alias, s)) { lastad = ad; continue; } lastad->next = ad->next; free(ad); break; } } /* * "help -a" output */ void dump_alias_data(void) { int i; struct alias_data *ad; fprintf(fp, "alias_head.next: %lx\n\n", (ulong)alias_head.next); for (ad = alias_head.next; ad; ad = ad->next) { fprintf(fp, " next: %lx\n", (ulong)ad->next); fprintf(fp, " alias: %s\n", ad->alias); fprintf(fp, " size: %d\n", ad->size); fprintf(fp, " origin: "); switch (ad->origin) { case ALIAS_RUNTIME: fprintf(fp, "runtime setting \n"); break; case ALIAS_RCLOCAL: fprintf(fp, ".%src \n", pc->program_name); break; case ALIAS_RCHOME: fprintf(fp, "$HOME/.%src \n", pc->program_name); break; case ALIAS_BUILTIN: fprintf(fp, "builtin\n"); break; } fprintf(fp, " argcnt: %d\n", ad->argcnt); for (i = 0; i < ad->argcnt; i++) fprintf(fp, " args[%d]: %lx: %s\n", i, (ulong)ad->args[i], ad->args[i]); fprintf(fp, "\n"); } } /* * Repeat a command on a live system. */ void cmd_repeat(void) { ulong delay; char buf[BUFSIZE]; char bufsave[BUFSIZE]; FILE *incoming_fp; if (argcnt == 1) cmd_usage(pc->curcmd, SYNOPSIS); delay = 0; if (args[1][0] == '-') { switch (args[1][1]) { default: case NULLCHAR: cmd_usage(pc->curcmd, SYNOPSIS); case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': delay = dtol(&args[1][1], FAULT_ON_ERROR, NULL); concat_args(buf, 2, FALSE); break; } } else concat_args(buf, 1, FALSE); check_special_handling(buf); strcpy(pc->command_line, buf); resolve_aliases(); if (!argcnt) return; strcpy(buf, pc->command_line); strcpy(bufsave, buf); argcnt = parse_line(buf, args); if (!argcnt) return; if (STREQ(args[0], "<") && (pc->flags & TTY) && (pc->flags & SCROLL) && pc->scroll_command) error(FATAL, "scrolling must be turned off when repeating an input file\n"); pc->curcmd_flags |= REPEAT; incoming_fp = fp; while (TRUE) { optind = 0; fp = incoming_fp; exec_command(); free_all_bufs(); wait_for_children(ZOMBIES_ONLY); if (received_SIGINT() || !output_open()) break; if ((pc->flags & TTY) && !is_a_tty("/dev/tty")) break; if (!(pc->curcmd_flags & REPEAT)) break; if (delay) sleep(delay); strcpy(buf, bufsave); argcnt = parse_line(buf, args); } } /* * Initialize readline, set the editing mode, and then perform any * crash-specific bindings, etc. */ static void readline_init(void) { rl_initialize(); if (STREQ(pc->editing_mode, "vi")) { rl_editing_mode = vi_mode; rl_bind_key(CTRL('N'), rl_get_next_history); rl_bind_key(CTRL('P'), rl_get_previous_history); rl_bind_key_in_map(CTRL('P'), rl_get_previous_history, vi_insertion_keymap); rl_bind_key_in_map(CTRL('N'), rl_get_next_history, vi_insertion_keymap); rl_bind_key_in_map(CTRL('l'), rl_clear_screen, vi_insertion_keymap); rl_generic_bind(ISFUNC, "[A", (char *)rl_get_previous_history, vi_movement_keymap); rl_generic_bind(ISFUNC, "[B", (char *)rl_get_next_history, vi_movement_keymap); } if (STREQ(pc->editing_mode, "emacs")) { rl_editing_mode = emacs_mode; } } /* * Find and set the tty string of this session as seen in "ps -ef" output. */ static void set_my_tty(void) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int argc; FILE *pipe; strcpy(pc->my_tty, "?"); if (file_exists("/usr/bin/tty", NULL)) { sprintf(buf, "/usr/bin/tty"); if ((pipe = popen(buf, "r")) == NULL) return; while (fgets(buf, BUFSIZE, pipe)) { if (STRNEQ(buf, "/dev/")) { strcpy(pc->my_tty, strip_line_end(&buf[strlen("/dev/")])); break; } } pclose(pipe); return; } sprintf(buf, "ps -ef | grep ' %d '", getpid()); if (CRASHDEBUG(1)) fprintf(fp, "popen(%s)\n", buf); if ((pipe = popen(buf, "r")) == NULL) return; while (fgets(buf, BUFSIZE, pipe)) { argc = parse_line(buf, arglist); if ((argc >= 8) && (atoi(arglist[1]) == getpid())) { if (strlen(arglist[5]) < 9) strcpy(pc->my_tty, arglist[5]); else strncpy(pc->my_tty, arglist[5], 9); } } pclose(pipe); } /* * Check whether SIGINT's are allowed before shipping a request off to gdb. */ int interruptible(void) { if (!(pc->flags & RUNTIME)) return FALSE; if (!(pc->flags & TTY)) return FALSE; if ((pc->redirect & (FROM_INPUT_FILE|REDIRECT_NOT_DONE)) == (FROM_INPUT_FILE|REDIRECT_NOT_DONE)) return TRUE; if (strlen(pc->pipe_command)) return FALSE; return TRUE; } /* * Set up the standard output pipe using whichever was selected during init. */ static int setup_stdpipe(void) { char *path; if (pipe(pc->pipefd) < 0) { error(INFO, "pipe system call failed: %s", strerror(errno)); return FALSE; } if ((pc->stdpipe_pid = fork()) < 0) { error(INFO, "fork system call failed: %s", strerror(errno)); return FALSE; } path = NULL; if (pc->stdpipe_pid > 0) { pc->redirect |= REDIRECT_PID_KNOWN; close(pc->pipefd[0]); /* parent closes read end */ if ((pc->stdpipe = fdopen(pc->pipefd[1], "w")) == NULL) { error(INFO, "fdopen system call failed: %s", strerror(errno)); return FALSE; } setbuf(pc->stdpipe, NULL); switch (pc->scroll_command) { case SCROLL_LESS: strcpy(pc->pipe_command, less_argv[0]); break; case SCROLL_MORE: strcpy(pc->pipe_command, more_argv[0]); break; case SCROLL_CRASHPAGER: strcpy(pc->pipe_command, CRASHPAGER_argv[0]); break; } if (CRASHDEBUG(2)) console("pipe: %lx\n", pc->stdpipe); return TRUE;; } else { close(pc->pipefd[1]); /* child closes write end */ if (dup2(pc->pipefd[0], 0) != 0) { perror("child dup2 failed"); clean_exit(1); } if (CRASHDEBUG(2)) console("execv: %d\n", getpid()); switch (pc->scroll_command) { case SCROLL_LESS: path = less_argv[0]; execv(path, less_argv); break; case SCROLL_MORE: path = more_argv[0]; execv(path, more_argv); break; case SCROLL_CRASHPAGER: path = CRASHPAGER_argv[0]; execv(path, CRASHPAGER_argv); break; } perror(path); fprintf(stderr, "execv of scroll command failed\n"); exit(1); } } static void wait_for_children(ulong waitflag) { int status, pid; while (TRUE) { switch (pid = waitpid(-1, &status, WNOHANG)) { case 0: if (CRASHDEBUG(2)) console("wait_for_children: child running...\n"); if (waitflag == ZOMBIES_ONLY) return; break; case -1: if (CRASHDEBUG(2)) console("wait_for_children: no children alive\n"); return; default: console("wait_for_children(%d): reaped %d\n", waitflag, pid); if (CRASHDEBUG(2)) fprintf(fp, "wait_for_children: reaped %d\n", pid); break; } stall(1000); } } /* * Run an escaped shell command, redirecting the output to * the current output file. */ int shell_command(char *cmd) { FILE *pipe; char buf[BUFSIZE]; if ((pipe = popen(cmd, "r")) == NULL) { error(INFO, "cannot open pipe: %s\n", cmd); pc->redirect &= ~REDIRECT_SHELL_COMMAND; pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } while (fgets(buf, BUFSIZE, pipe)) fputs(buf, fp); pclose(pipe); return REDIRECT_SHELL_COMMAND; } static int verify_args_input_file(char *fileptr) { struct stat stat; if (!file_exists(fileptr, &stat)) { if (CRASHDEBUG(1)) error(INFO, "%s: no such file\n", fileptr); } else if (!S_ISREG(stat.st_mode)) { if (CRASHDEBUG(1)) error(INFO, "%s: not a regular file\n", fileptr); } else if (!stat.st_size) { if (CRASHDEBUG(1)) error(INFO, "%s: file is empty\n", fileptr); } else if (!file_readable(fileptr)) { if (CRASHDEBUG(1)) error(INFO, "%s: permission denied\n", fileptr); } else return TRUE; return FALSE; } /* * Verify a command line argument input file. */ #define NON_FILENAME_CHARS "*?!|\'\"{}<>;,^()$~" int is_args_input_file(struct command_table_entry *ct, struct args_input_file *aif) { int c, start, whites, args_used; char *p1, *p2, *curptr, *fileptr; char buf[BUFSIZE]; int retval; if (pc->curcmd_flags & NO_MODIFY) return FALSE; BZERO(aif, sizeof(struct args_input_file)); retval = FALSE; if (STREQ(ct->name, "gdb")) { curptr = pc->orig_line; next_gdb: if ((p1 = strstr(curptr, "<"))) { while (STRNEQ(p1, "<<")) { p2 = p1+2; if (!(p1 = strstr(p2, "<"))) return retval; } } if (!p1) return retval; start = p1 - curptr; p2 = p1+1; for (whites = 0; whitespace(*p2); whites++) p2++; if (*p2 == NULLCHAR) return retval; strcpy(buf, p2); p2 = buf; if (*p2) { fileptr = p2; while (*p2 && !whitespace(*p2) && (strpbrk(p2, NON_FILENAME_CHARS) != p2)) p2++; *p2 = NULLCHAR; if (verify_args_input_file(fileptr)) { if (retval == TRUE) { error(INFO, "ignoring multiple argument input files: " "%s and %s\n", aif->fileptr, fileptr); return FALSE; } aif->start = start; aif->resume = start + (p2-buf) + whites + 1; aif->fileptr = GETBUF(strlen(fileptr)+1); strcpy(aif->fileptr, fileptr); aif->is_gdb_cmd = TRUE; retval = TRUE; } } curptr = p1+1; goto next_gdb; } for (c = 0; c < argcnt; c++) { if (STRNEQ(args[c], "<") && !STRNEQ(args[c], "<<")) { if (strlen(args[c]) > 1) { fileptr = &args[c][1]; args_used = 1; } else { if ((c+1) == argcnt) error(FATAL, "< requires a file argument\n"); fileptr = args[c+1]; args_used = 2; } if (!verify_args_input_file(fileptr)) continue; if (retval == TRUE) error(FATAL, "multiple input files are not supported\n"); aif->index = c; aif->fileptr = GETBUF(strlen(fileptr)+1); strcpy(aif->fileptr, fileptr); aif->args_used = args_used; retval = TRUE; continue; } if (STRNEQ(args[c], "(")) { curptr = args[c]; next_expr: if ((p1 = strstr(curptr, "<"))) { while (STRNEQ(p1, "<<")) { p2 = p1+2; if (!(p1 = strstr(p2, "<"))) continue; } } if (!p1) continue; start = p1 - curptr; p2 = p1+1; for (whites = 0; whitespace(*p2); whites++) p2++; if (*p2 == NULLCHAR) continue; strcpy(buf, p2); p2 = buf; if (*p2) { fileptr = p2; while (*p2 && !whitespace(*p2) && (strpbrk(p2, NON_FILENAME_CHARS) != p2)) p2++; *p2 = NULLCHAR; if (!verify_args_input_file(fileptr)) continue; if (retval == TRUE) { error(INFO, "ignoring multiple argument input files: " "%s and %s\n", aif->fileptr, fileptr); return FALSE; } retval = TRUE; aif->in_expression = TRUE; aif->args_used = 1; aif->index = c; aif->start = start; aif->resume = start + (p2-buf) + whites + 1; aif->fileptr = GETBUF(strlen(fileptr)+1); strcpy(aif->fileptr, fileptr); } curptr = p1+1; goto next_expr; } } return retval; } static void modify_orig_line(char *inbuf, struct args_input_file *aif) { char buf[BUFSIZE]; strcpy(buf, pc->orig_line); strcpy(&buf[aif->start], inbuf); strcat(buf, &pc->orig_line[aif->resume]); strcpy(pc->orig_line, buf); } static void modify_expression_arg(char *inbuf, char **aif_args, struct args_input_file *aif) { char *old, *new; old = aif_args[aif->index]; new = GETBUF(strlen(aif_args[aif->index]) + strlen(inbuf)); strcpy(new, old); strcpy(&new[aif->start], inbuf); strcat(new, &old[aif->resume]); aif_args[aif->index] = new; } /* * Sequence through an args input file, and for each line, * reinitialize the global args[] and argcnt, and issue the command. */ void exec_args_input_file(struct command_table_entry *ct, struct args_input_file *aif) { char buf[BUFSIZE]; int i, c, aif_cnt; int orig_argcnt; char *aif_args[MAXARGS]; char *new_args[MAXARGS]; char *orig_args[MAXARGS]; char orig_line[BUFSIZE]; char *save_args[MAXARGS]; char save_line[BUFSIZE]; if ((pc->args_ifile = fopen(aif->fileptr, "r")) == NULL) error(FATAL, "%s: %s\n", aif->fileptr, strerror(errno)); if (aif->is_gdb_cmd) strcpy(orig_line, pc->orig_line); BCOPY(args, orig_args, sizeof(args)); orig_argcnt = argcnt; /* * Commands cannot be trusted to leave the arguments intact. * Stash them here and restore them each time through the loop. */ save_args[0] = save_line; for (i = 0; i < orig_argcnt; i++) { strcpy(save_args[i], orig_args[i]); save_args[i+1] = save_args[i] + strlen(save_args[i]) + 2; } while (fgets(buf, BUFSIZE-1, pc->args_ifile)) { clean_line(buf); if ((strlen(buf) == 0) || (buf[0] == '#')) continue; for (i = 1; i < orig_argcnt; i++) strcpy(orig_args[i], save_args[i]); if (aif->is_gdb_cmd) { console("(gdb) before: [%s]\n", orig_line); strcpy(pc->orig_line, orig_line); modify_orig_line(buf, aif); console("(gdb) after: [%s]\n", pc->orig_line); } else if (aif->in_expression) { console("expr before: [%s]\n", orig_args[aif->index]); BCOPY(orig_args, aif_args, sizeof(aif_args)); modify_expression_arg(buf, aif_args, aif); BCOPY(aif_args, args, sizeof(aif_args)); console("expr after: [%s]\n", args[aif->index]); } else { if (!(aif_cnt = parse_line(buf, aif_args))) continue; for (i = 0; i < orig_argcnt; i++) console("%s[%d]:%s %s", (i == 0) ? "before: " : "", i, orig_args[i], (i+1) == orig_argcnt ? "\n" : ""); for (i = 0; i < aif->index; i++) new_args[i] = orig_args[i]; for (i = aif->index, c = 0; c < aif_cnt; c++, i++) new_args[i] = aif_args[c]; for (i = aif->index + aif_cnt, c = aif->index + aif->args_used; c < orig_argcnt; c++, i++) new_args[i] = orig_args[c]; argcnt = orig_argcnt - aif->args_used + aif_cnt; new_args[argcnt] = NULL; BCOPY(new_args, args, sizeof(args)); for (i = 0; i < argcnt; i++) console("%s[%d]:%s %s", (i == 0) ? " after: " : "", i, args[i], (i+1) == argcnt ? "\n" : ""); } optind = argerrs = 0; pc->cmdgencur++; if (setjmp(pc->foreach_loop_env)) pc->flags &= ~IN_FOREACH; else { pc->flags |= IN_FOREACH; (*ct->func)(); pc->flags &= ~IN_FOREACH; } if (pc->cmd_cleanup) pc->cmd_cleanup(pc->cmd_cleanup_arg); free_all_bufs(); if (received_SIGINT()) break; } fclose(pc->args_ifile); pc->args_ifile = NULL; } crash-7.1.4/xen_hyper.c0000664000000000000000000017312312634305150013476 0ustar rootroot/* * xen_hyper.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" static void xen_hyper_schedule_init(void); /* * Do initialization for Xen Hyper system here. */ void xen_hyper_init(void) { char *buf; #if defined(X86) || defined(X86_64) long member_offset; #endif #ifdef X86_64 xht->xen_virt_start = symbol_value("start"); /* * Xen virtual mapping is aligned to 1 GiB boundary. * Image starts no more than 1 GiB below * beginning of virtual address space. */ xht->xen_virt_start &= 0xffffffffc0000000; #endif if (machine_type("X86_64") && symbol_exists("xen_phys_start") && !xen_phys_start()) error(WARNING, "This hypervisor is relocatable; if initialization fails below, try\n" " using the \"--xen_phys_start
\" command line option.\n\n"); if (symbol_exists("crashing_cpu")) { get_symbol_data("crashing_cpu", sizeof(xht->crashing_cpu), &xht->crashing_cpu); } else { xht->crashing_cpu = XEN_HYPER_PCPU_ID_INVALID; } machdep->get_smp_cpus(); machdep->memory_size(); if (symbol_exists("__per_cpu_offset")) { xht->flags |= XEN_HYPER_SMP; if((xht->__per_cpu_offset = malloc(sizeof(ulong) * XEN_HYPER_MAX_CPUS())) == NULL) { error(FATAL, "cannot malloc __per_cpu_offset space.\n"); } if (!readmem(symbol_value("__per_cpu_offset"), KVADDR, xht->__per_cpu_offset, sizeof(ulong) * XEN_HYPER_MAX_CPUS(), "__per_cpu_offset", RETURN_ON_ERROR)) { error(FATAL, "cannot read __per_cpu_offset.\n"); } } #if defined(X86) || defined(X86_64) if (symbol_exists("__per_cpu_shift")) { xht->percpu_shift = (int)symbol_value("__per_cpu_shift"); } else if (xen_major_version() >= 3 && xen_minor_version() >= 3) { xht->percpu_shift = 13; } else { xht->percpu_shift = 12; } member_offset = MEMBER_OFFSET("cpuinfo_x86", "x86_model_id"); buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_x86)); if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { xen_hyper_x86_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf); } else { xen_hyper_x86_fill_cpu_data(xht->cpu_idxs[0], buf); } strncpy(xht->utsname.machine, (char *)(buf + member_offset), sizeof(xht->utsname.machine)-1); FREEBUF(buf); #elif defined(IA64) buf = GETBUF(XEN_HYPER_SIZE(cpuinfo_ia64)); if (xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { xen_hyper_ia64_fill_cpu_data(XEN_HYPER_CRASHING_CPU(), buf); } else { xen_hyper_ia64_fill_cpu_data(xht->cpu_idxs[0], buf); } strncpy(xht->utsname.machine, (char *)(buf + XEN_HYPER_OFFSET(cpuinfo_ia64_vendor)), sizeof(xht->utsname.machine)-1); FREEBUF(buf); #endif #ifndef IA64 XEN_HYPER_STRUCT_SIZE_INIT(note_buf_t, "note_buf_t"); XEN_HYPER_STRUCT_SIZE_INIT(crash_note_t, "crash_note_t"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_core, "crash_note_t", "core"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen, "crash_note_t", "xen"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_regs, "crash_note_t", "xen_regs"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_t_xen_info, "crash_note_t", "xen_info"); XEN_HYPER_STRUCT_SIZE_INIT(crash_note_core_t, "crash_note_core_t"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_note, "crash_note_core_t", "note"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_core_t_desc, "crash_note_core_t", "desc"); XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_t, "crash_note_xen_t"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_note, "crash_note_xen_t", "note"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_t_desc, "crash_note_xen_t", "desc"); XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_core_t, "crash_note_xen_core_t"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_note, "crash_note_xen_core_t", "note"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_core_t_desc, "crash_note_xen_core_t", "desc"); XEN_HYPER_STRUCT_SIZE_INIT(crash_note_xen_info_t, "crash_note_xen_info_t"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_note, "crash_note_xen_info_t", "note"); XEN_HYPER_MEMBER_OFFSET_INIT(crash_note_xen_info_t_desc, "crash_note_xen_info_t", "desc"); XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_core_t, "crash_xen_core_t"); XEN_HYPER_STRUCT_SIZE_INIT(crash_xen_info_t, "crash_xen_info_t"); XEN_HYPER_STRUCT_SIZE_INIT(xen_crash_xen_regs_t, "xen_crash_xen_regs_t"); XEN_HYPER_STRUCT_SIZE_INIT(ELF_Prstatus,"ELF_Prstatus"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_info, "ELF_Prstatus", "pr_info"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cursig, "ELF_Prstatus", "pr_cursig"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sigpend, "ELF_Prstatus", "pr_sigpend"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sighold, "ELF_Prstatus", "pr_sighold"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pid, "ELF_Prstatus", "pr_pid"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_ppid, "ELF_Prstatus", "pr_ppid"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_pgrp, "ELF_Prstatus", "pr_pgrp"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_sid, "ELF_Prstatus", "pr_sid"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_utime, "ELF_Prstatus", "pr_utime"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_stime, "ELF_Prstatus", "pr_stime"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cutime, "ELF_Prstatus", "pr_cutime"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_cstime, "ELF_Prstatus", "pr_cstime"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_reg, "ELF_Prstatus", "pr_reg"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Prstatus_pr_fpvalid, "ELF_Prstatus", "pr_fpvalid"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_sec, "ELF_Timeval", "tv_sec"); XEN_HYPER_MEMBER_OFFSET_INIT(ELF_Timeval_tv_usec, "ELF_Timeval", "tv_usec"); XEN_HYPER_STRUCT_SIZE_INIT(ELF_Signifo,"ELF_Signifo"); XEN_HYPER_STRUCT_SIZE_INIT(ELF_Gregset,"ELF_Gregset"); XEN_HYPER_STRUCT_SIZE_INIT(ELF_Timeval,"ELF_Timeval"); #endif XEN_HYPER_STRUCT_SIZE_INIT(domain, "domain"); XEN_HYPER_STRUCT_SIZE_INIT(vcpu, "vcpu"); #ifndef IA64 XEN_HYPER_STRUCT_SIZE_INIT(cpu_info, "cpu_info"); #endif XEN_HYPER_STRUCT_SIZE_INIT(cpu_user_regs, "cpu_user_regs"); xht->idle_vcpu_size = get_array_length("idle_vcpu", NULL, 0); xht->idle_vcpu_array = (ulong *)malloc(xht->idle_vcpu_size * sizeof(ulong)); if (xht->idle_vcpu_array == NULL) { error(FATAL, "cannot malloc idle_vcpu_array space.\n"); } if (!readmem(symbol_value("idle_vcpu"), KVADDR, xht->idle_vcpu_array, xht->idle_vcpu_size * sizeof(ulong), "idle_vcpu_array", RETURN_ON_ERROR)) { error(FATAL, "cannot read idle_vcpu array.\n"); } /* * Do some initialization. */ #ifndef IA64 xen_hyper_dumpinfo_init(); #endif xhmachdep->pcpu_init(); xen_hyper_domain_init(); xen_hyper_vcpu_init(); xen_hyper_misc_init(); /* * xen_hyper_post_init() have to be called after all initialize * functions finished. */ xen_hyper_post_init(); } /* * Do initialization for Domain of Xen Hyper system here. */ void xen_hyper_domain_init(void) { XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_id, "domain", "domain_id"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_tot_pages, "domain", "tot_pages"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_pages, "domain", "max_pages"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_xenheap_pages, "domain", "xenheap_pages"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_shared_info, "domain", "shared_info"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_sched_priv, "domain", "sched_priv"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_next_in_list, "domain", "next_in_list"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_domain_flags, "domain", "domain_flags"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_evtchn, "domain", "evtchn"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_hvm, "domain", "is_hvm"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_guest_type, "domain", "guest_type"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_privileged, "domain", "is_privileged"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_debugger_attached, "domain", "debugger_attached"); /* * Will be removed in Xen 4.4 (hg ae9b223a675d), * need to check that with XEN_HYPER_VALID_MEMBER() before using */ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_polling, "domain", "is_polling"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_dying, "domain", "is_dying"); /* * With Xen 4.2.5 is_paused_by_controller changed to * controller_pause_count. */ XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_paused_by_controller, "domain", "is_paused_by_controller"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_controller_pause_count, "domain", "controller_pause_count"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shutting_down, "domain", "is_shutting_down"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_is_shut_down, "domain", "is_shut_down"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_vcpu, "domain", "vcpu"); XEN_HYPER_MEMBER_SIZE_INIT(domain_vcpu, "domain", "vcpu"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_max_vcpus, "domain", "max_vcpus"); XEN_HYPER_MEMBER_OFFSET_INIT(domain_arch, "domain", "arch"); XEN_HYPER_STRUCT_SIZE_INIT(arch_shared_info, "arch_shared_info"); XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_max_pfn, "arch_shared_info", "max_pfn"); XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_pfn_to_mfn_frame_list_list, "arch_shared_info", "pfn_to_mfn_frame_list_list"); XEN_HYPER_MEMBER_OFFSET_INIT(arch_shared_info_nmi_reason, "arch_shared_info", "nmi_reason"); XEN_HYPER_STRUCT_SIZE_INIT(shared_info, "shared_info"); XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_vcpu_info, "shared_info", "vcpu_info"); XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_pending, "shared_info", "evtchn_pending"); XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_evtchn_mask, "shared_info", "evtchn_mask"); XEN_HYPER_MEMBER_OFFSET_INIT(shared_info_arch, "shared_info", "arch"); XEN_HYPER_STRUCT_SIZE_INIT(arch_domain, "arch_domain"); #ifdef IA64 XEN_HYPER_MEMBER_OFFSET_INIT(arch_domain_mm, "arch_domain", "mm"); XEN_HYPER_STRUCT_SIZE_INIT(mm_struct, "mm_struct"); XEN_HYPER_MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); #endif if((xhdt->domain_struct = malloc(XEN_HYPER_SIZE(domain))) == NULL) { error(FATAL, "cannot malloc domain struct space.\n"); } if((xhdt->domain_struct_verify = malloc(XEN_HYPER_SIZE(domain))) == NULL) { error(FATAL, "cannot malloc domain struct space to verification.\n"); } xen_hyper_refresh_domain_context_space(); xhdt->flags |= XEN_HYPER_DOMAIN_F_INIT; } /* * Do initialization for vcpu of Xen Hyper system here. */ void xen_hyper_vcpu_init(void) { XEN_HYPER_STRUCT_SIZE_INIT(timer, "timer"); XEN_HYPER_MEMBER_OFFSET_INIT(timer_expires, "timer", "expires"); XEN_HYPER_MEMBER_OFFSET_INIT(timer_cpu, "timer", "cpu"); XEN_HYPER_MEMBER_OFFSET_INIT(timer_function, "timer", "function"); XEN_HYPER_MEMBER_OFFSET_INIT(timer_data, "timer", "data"); XEN_HYPER_MEMBER_OFFSET_INIT(timer_heap_offset, "timer", "heap_offset"); XEN_HYPER_MEMBER_OFFSET_INIT(timer_killed, "timer", "killed"); XEN_HYPER_STRUCT_SIZE_INIT(vcpu_runstate_info, "vcpu_runstate_info"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state, "vcpu_runstate_info", "state"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_state_entry_time, "vcpu_runstate_info", "state_entry_time"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_info_time, "vcpu_runstate_info", "time"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_id, "vcpu", "vcpu_id"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_processor, "vcpu", "processor"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_info, "vcpu", "vcpu_info"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_domain, "vcpu", "domain"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_next_in_list, "vcpu", "next_in_list"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_timer, "vcpu", "timer"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sleep_tick, "vcpu", "sleep_tick"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_poll_timer, "vcpu", "poll_timer"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_sched_priv, "vcpu", "sched_priv"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate, "vcpu", "runstate"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_runstate_guest, "vcpu", "runstate_guest"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_flags, "vcpu", "vcpu_flags"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_pause_count, "vcpu", "pause_count"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_virq_to_evtchn, "vcpu", "virq_to_evtchn"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_cpu_affinity, "vcpu", "cpu_affinity"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_nmi_addr, "vcpu", "nmi_addr"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_vcpu_dirty_cpumask, "vcpu", "vcpu_dirty_cpumask"); XEN_HYPER_MEMBER_OFFSET_INIT(vcpu_arch, "vcpu", "arch"); #ifdef IA64 XEN_HYPER_ASSIGN_OFFSET(vcpu_thread_ksp) = MEMBER_OFFSET("vcpu", "arch") + MEMBER_OFFSET("arch_vcpu", "_thread") + MEMBER_OFFSET("thread_struct", "ksp"); #endif if((xhvct->vcpu_struct = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) { error(FATAL, "cannot malloc vcpu struct space.\n"); } if((xhvct->vcpu_struct_verify = malloc(XEN_HYPER_SIZE(vcpu))) == NULL) { error(FATAL, "cannot malloc vcpu struct space to verification.\n"); } xen_hyper_refresh_vcpu_context_space(); xhvct->flags |= XEN_HYPER_VCPU_F_INIT; xhvct->idle_vcpu = symbol_value("idle_vcpu"); } /* * Do initialization for pcpu of Xen Hyper system here. */ #if defined(X86) || defined(X86_64) void xen_hyper_x86_pcpu_init(void) { ulong cpu_info; ulong init_tss_base, init_tss; ulong sp; struct xen_hyper_pcpu_context *pcc; char *buf, *bp; int i, cpuid; int flag; XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_guest_cpu_user_regs, "cpu_info", "guest_cpu_user_regs"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_processor_id, "cpu_info", "processor_id"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_info_current_vcpu, "cpu_info", "current_vcpu"); if((xhpct->pcpu_struct = malloc(XEN_HYPER_SIZE(cpu_info))) == NULL) { error(FATAL, "cannot malloc pcpu struct space.\n"); } /* get physical cpu context */ xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS()); if (symbol_exists("per_cpu__init_tss")) { init_tss_base = symbol_value("per_cpu__init_tss"); flag = TRUE; } else { init_tss_base = symbol_value("init_tss"); flag = FALSE; } buf = GETBUF(XEN_HYPER_SIZE(tss_struct)); for_cpu_indexes(i, cpuid) { if (flag) init_tss = xen_hyper_per_cpu(init_tss_base, cpuid); else init_tss = init_tss_base + XEN_HYPER_SIZE(tss_struct) * cpuid; if (!readmem(init_tss, KVADDR, buf, XEN_HYPER_SIZE(tss_struct), "init_tss", RETURN_ON_ERROR)) { error(FATAL, "cannot read init_tss.\n"); } if (machine_type("X86")) { sp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_esp0)); } else if (machine_type("X86_64")) { sp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_rsp0)); } else sp = 0; cpu_info = XEN_HYPER_GET_CPU_INFO(sp); if (CRASHDEBUG(1)) { fprintf(fp, "sp=%lx, cpu_info=%lx\n", sp, cpu_info); } if(!(bp = xen_hyper_read_pcpu(cpu_info))) { error(FATAL, "cannot read cpu_info.\n"); } pcc = &xhpct->context_array[cpuid]; xen_hyper_store_pcpu_context(pcc, cpu_info, bp); xen_hyper_store_pcpu_context_tss(pcc, init_tss, buf); } FREEBUF(buf); } #elif defined(IA64) void xen_hyper_ia64_pcpu_init(void) { struct xen_hyper_pcpu_context *pcc; int i, cpuid; /* get physical cpu context */ xen_hyper_alloc_pcpu_context_space(XEN_HYPER_MAX_CPUS()); for_cpu_indexes(i, cpuid) { pcc = &xhpct->context_array[cpuid]; pcc->processor_id = cpuid; } } #endif /* * Do initialization for some miscellaneous thing * of Xen Hyper system here. */ void xen_hyper_misc_init(void) { XEN_HYPER_STRUCT_SIZE_INIT(schedule_data, "schedule_data"); XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_schedule_lock, "schedule_data", "schedule_lock"); XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_curr, "schedule_data", "curr"); if (MEMBER_EXISTS("schedule_data", "idle")) XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_idle, "schedule_data", "idle"); XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_sched_priv, "schedule_data", "sched_priv"); XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_s_timer, "schedule_data", "s_timer"); XEN_HYPER_MEMBER_OFFSET_INIT(schedule_data_tick, "schedule_data", "tick"); XEN_HYPER_STRUCT_SIZE_INIT(scheduler, "scheduler"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_name, "scheduler", "name"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_opt_name, "scheduler", "opt_name"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sched_id, "scheduler", "sched_id"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init, "scheduler", "init"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_tick, "scheduler", "tick"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_init_vcpu, "scheduler", "init_vcpu"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_destroy_domain, "scheduler", "destroy_domain"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_sleep, "scheduler", "sleep"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_wake, "scheduler", "wake"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_set_affinity, "scheduler", "set_affinity"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_do_schedule, "scheduler", "do_schedule"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_adjust, "scheduler", "adjust"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_settings, "scheduler", "dump_settings"); XEN_HYPER_MEMBER_OFFSET_INIT(scheduler_dump_cpu_state, "scheduler", "dump_cpu_state"); xen_hyper_schedule_init(); } /* * Do initialization for scheduler of Xen Hyper system here. */ #define XEN_HYPER_SCHEDULERS_ARRAY_CNT 10 #define XEN_HYPER_SCHEDULER_NAME 1024 static void xen_hyper_schedule_init(void) { ulong addr, opt_sched, schedulers, opt_name; long scheduler_opt_name; long schedulers_buf[XEN_HYPER_SCHEDULERS_ARRAY_CNT]; struct xen_hyper_sched_context *schc; char *buf; char opt_name_buf[XEN_HYPER_OPT_SCHED_SIZE]; int i, cpuid, flag; /* get scheduler information */ if((xhscht->scheduler_struct = malloc(XEN_HYPER_SIZE(scheduler))) == NULL) { error(FATAL, "cannot malloc scheduler struct space.\n"); } buf = GETBUF(XEN_HYPER_SCHEDULER_NAME); scheduler_opt_name = XEN_HYPER_OFFSET(scheduler_opt_name); if (symbol_exists("ops")) { if (!readmem(symbol_value("ops") + scheduler_opt_name, KVADDR, &opt_sched, sizeof(ulong), "ops.opt_name", RETURN_ON_ERROR)) { error(FATAL, "cannot read ops.opt_name.\n"); } } else { opt_sched = symbol_value("opt_sched"); } if (!readmem(opt_sched, KVADDR, xhscht->opt_sched, XEN_HYPER_OPT_SCHED_SIZE, "opt_sched,", RETURN_ON_ERROR)) { error(FATAL, "cannot read opt_sched,.\n"); } schedulers = symbol_value("schedulers"); addr = schedulers; while (xhscht->name == NULL) { if (!readmem(addr, KVADDR, schedulers_buf, sizeof(long) * XEN_HYPER_SCHEDULERS_ARRAY_CNT, "schedulers", RETURN_ON_ERROR)) { error(FATAL, "cannot read schedulers.\n"); } for (i = 0; i < XEN_HYPER_SCHEDULERS_ARRAY_CNT; i++) { if (schedulers_buf[i] == 0) { error(FATAL, "schedule data not found.\n"); } if (!readmem(schedulers_buf[i], KVADDR, xhscht->scheduler_struct, XEN_HYPER_SIZE(scheduler), "scheduler", RETURN_ON_ERROR)) { error(FATAL, "cannot read scheduler.\n"); } opt_name = ULONG(xhscht->scheduler_struct + scheduler_opt_name); if (!readmem(opt_name, KVADDR, opt_name_buf, XEN_HYPER_OPT_SCHED_SIZE, "opt_name", RETURN_ON_ERROR)) { error(FATAL, "cannot read opt_name.\n"); } if (strncmp(xhscht->opt_sched, opt_name_buf, XEN_HYPER_OPT_SCHED_SIZE)) continue; xhscht->scheduler = schedulers_buf[i]; xhscht->sched_id = INT(xhscht->scheduler_struct + XEN_HYPER_OFFSET(scheduler_sched_id)); addr = ULONG(xhscht->scheduler_struct + XEN_HYPER_OFFSET(scheduler_name)); if (!readmem(addr, KVADDR, buf, XEN_HYPER_SCHEDULER_NAME, "scheduler_name", RETURN_ON_ERROR)) { error(FATAL, "cannot read scheduler_name.\n"); } if (strlen(buf) >= XEN_HYPER_SCHEDULER_NAME) { error(FATAL, "cannot read scheduler_name.\n"); } if((xhscht->name = malloc(strlen(buf) + 1)) == NULL) { error(FATAL, "cannot malloc scheduler_name space.\n"); } BZERO(xhscht->name, strlen(buf) + 1); strncpy(xhscht->name, buf, strlen(buf)); break; } addr += sizeof(long) * XEN_HYPER_SCHEDULERS_ARRAY_CNT; } FREEBUF(buf); /* get schedule_data information */ if((xhscht->sched_context_array = malloc(sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS())) == NULL) { error(FATAL, "cannot malloc xen_hyper_sched_context struct space.\n"); } BZERO(xhscht->sched_context_array, sizeof(struct xen_hyper_sched_context) * XEN_HYPER_MAX_CPUS()); buf = GETBUF(XEN_HYPER_SIZE(schedule_data)); if (symbol_exists("per_cpu__schedule_data")) { addr = symbol_value("per_cpu__schedule_data"); flag = TRUE; } else { addr = symbol_value("schedule_data"); flag = FALSE; } for_cpu_indexes(i, cpuid) { schc = &xhscht->sched_context_array[cpuid]; if (flag) { schc->schedule_data = xen_hyper_per_cpu(addr, i); } else { schc->schedule_data = addr + XEN_HYPER_SIZE(schedule_data) * i; } if (!readmem(schc->schedule_data, KVADDR, buf, XEN_HYPER_SIZE(schedule_data), "schedule_data", RETURN_ON_ERROR)) { error(FATAL, "cannot read schedule_data.\n"); } schc->cpu_id = cpuid; schc->curr = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_curr)); if (MEMBER_EXISTS("schedule_data", "idle")) schc->idle = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_idle)); else schc->idle = xht->idle_vcpu_array[cpuid]; schc->sched_priv = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_sched_priv)); if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) schc->tick = ULONG(buf + XEN_HYPER_OFFSET(schedule_data_tick)); } FREEBUF(buf); } /* * This should be called after all initailize process finished. */ void xen_hyper_post_init(void) { struct xen_hyper_pcpu_context *pcc; int i, cpuid; /* set current vcpu to pcpu context */ for_cpu_indexes(i, cpuid) { pcc = &xhpct->context_array[cpuid]; if (!pcc->current_vcpu) { pcc->current_vcpu = xen_hyper_get_active_vcpu_from_pcpuid(cpuid); } } /* set pcpu last */ if (!(xhpct->last = xen_hyper_id_to_pcpu_context(XEN_HYPER_CRASHING_CPU()))) { xhpct->last = &xhpct->context_array[xht->cpu_idxs[0]]; } /* set vcpu last */ if (xhpct->last) { xhvct->last = xen_hyper_vcpu_to_vcpu_context(xhpct->last->current_vcpu); /* set crashing vcpu */ xht->crashing_vcc = xhvct->last; } if (!xhvct->last) { xhvct->last = xhvct->vcpu_context_arrays->context_array; } /* set domain last */ if (xhvct->last) { xhdt->last = xen_hyper_domain_to_domain_context(xhvct->last->domain); } if (!xhdt->last) { xhdt->last = xhdt->context_array; } } /* * Do initialization for dump information here. */ void xen_hyper_dumpinfo_init(void) { Elf32_Nhdr *note; char *buf, *bp, *np, *upp; char *nccp, *xccp; ulong addr; long size; int i, cpuid, samp_cpuid; /* * NOTE kakuma: It is not clear that what kind of * a elf note format each one of the xen uses. * So, we decide it confirming whether a symbol exists. */ if (STRUCT_EXISTS("note_buf_t")) xhdit->note_ver = XEN_HYPER_ELF_NOTE_V1; else if (STRUCT_EXISTS("crash_note_xen_t")) xhdit->note_ver = XEN_HYPER_ELF_NOTE_V2; else if (STRUCT_EXISTS("crash_xen_core_t")) { if (STRUCT_EXISTS("crash_note_xen_core_t")) xhdit->note_ver = XEN_HYPER_ELF_NOTE_V3; else xhdit->note_ver = XEN_HYPER_ELF_NOTE_V4; } else { error(WARNING, "found unsupported elf note format while checking of xen dumpinfo.\n"); return; } if (!xen_hyper_test_pcpu_id(XEN_HYPER_CRASHING_CPU())) { error(WARNING, "crashing_cpu not found.\n"); return; } /* allocate a context area */ size = sizeof(struct xen_hyper_dumpinfo_context) * machdep->get_smp_cpus(); if((xhdit->context_array = malloc(size)) == NULL) { error(FATAL, "cannot malloc dumpinfo table context space.\n"); } BZERO(xhdit->context_array, size); size = sizeof(struct xen_hyper_dumpinfo_context_xen_core) * machdep->get_smp_cpus(); if((xhdit->context_xen_core_array = malloc(size)) == NULL) { error(FATAL, "cannot malloc dumpinfo table context_xen_core_array space.\n"); } BZERO(xhdit->context_xen_core_array, size); if (symbol_exists("per_cpu__crash_notes")) addr = symbol_value("per_cpu__crash_notes"); else get_symbol_data("crash_notes", sizeof(ulong), &addr); for (i = 0; i < machdep->get_smp_cpus(); i++) { ulong addr_notes; if (symbol_exists("per_cpu__crash_notes")) addr_notes = xen_hyper_per_cpu(addr, i); else addr_notes = addr + i * STRUCT_SIZE("crash_note_range_t") + MEMBER_OFFSET("crash_note_range_t", "start"); if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) { if (!readmem(addr_notes, KVADDR, &(xhdit->context_array[i].note), sizeof(ulong), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes.\n"); return; } } else { xhdit->context_array[i].note = addr_notes; } } if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V1) { xhdit->note_size = XEN_HYPER_SIZE(note_buf_t); } else if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) { xhdit->note_size = XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE; } else { xhdit->note_size = XEN_HYPER_SIZE(crash_note_t); } /* read a sample note */ buf = GETBUF(xhdit->note_size); if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V4) samp_cpuid = xht->cpu_idxs[0]; else samp_cpuid = XEN_HYPER_CRASHING_CPU(); xhdit->xen_info_cpu = samp_cpuid; if (!xen_hyper_fill_elf_notes(xhdit->context_array[samp_cpuid].note, buf, XEN_HYPER_ELF_NOTE_FILL_T_NOTE)) { error(FATAL, "cannot read crash_notes.\n"); } bp = buf; /* Get elf format information for each version. */ switch (xhdit->note_ver) { case XEN_HYPER_ELF_NOTE_V1: /* core data */ note = (Elf32_Nhdr *)bp; np = bp + sizeof(Elf32_Nhdr); upp = np + note->n_namesz; upp = (char *)roundup((ulong)upp, 4); xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note); note = (Elf32_Nhdr *)(upp + note->n_descsz); /* cr3 data */ np = (char *)note + sizeof(Elf32_Nhdr); upp = np + note->n_namesz; upp = (char *)roundup((ulong)upp, 4); upp = upp + note->n_descsz; xhdit->core_size = upp - bp; break; case XEN_HYPER_ELF_NOTE_V2: /* core data */ xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc); xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t); /* xen core */ xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_t_desc); xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_t); break; case XEN_HYPER_ELF_NOTE_V3: /* core data */ xhdit->core_offset = XEN_HYPER_OFFSET(crash_note_core_t_desc); xhdit->core_size = XEN_HYPER_SIZE(crash_note_core_t); /* xen core */ xhdit->xen_core_offset = XEN_HYPER_OFFSET(crash_note_xen_core_t_desc); xhdit->xen_core_size = XEN_HYPER_SIZE(crash_note_xen_core_t); /* xen info */ xhdit->xen_info_offset = XEN_HYPER_OFFSET(crash_note_xen_info_t_desc); xhdit->xen_info_size = XEN_HYPER_SIZE(crash_note_xen_info_t); break; case XEN_HYPER_ELF_NOTE_V4: /* core data */ note = (Elf32_Nhdr *)bp; np = bp + sizeof(Elf32_Nhdr); upp = np + note->n_namesz; upp = (char *)roundup((ulong)upp, 4); xhdit->core_offset = (Elf_Word)((ulong)upp - (ulong)note); upp = upp + note->n_descsz; xhdit->core_size = (Elf_Word)((ulong)upp - (ulong)note); if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < xhdit->core_size + 32) { error(WARNING, "note size is assumed on crash is incorrect.(core data)\n"); return; } /* xen core */ note = (Elf32_Nhdr *)upp; np = (char *)note + sizeof(Elf32_Nhdr); upp = np + note->n_namesz; upp = (char *)roundup((ulong)upp, 4); xhdit->xen_core_offset = (Elf_Word)((ulong)upp - (ulong)note); upp = upp + note->n_descsz; xhdit->xen_core_size = (Elf_Word)((ulong)upp - (ulong)note); if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < xhdit->core_size + xhdit->xen_core_size + 32) { error(WARNING, "note size is assumed on crash is incorrect.(xen core)\n"); return; } /* xen info */ note = (Elf32_Nhdr *)upp; np = (char *)note + sizeof(Elf32_Nhdr); upp = np + note->n_namesz; upp = (char *)roundup((ulong)upp, 4); xhdit->xen_info_offset = (Elf_Word)((ulong)upp - (ulong)note); upp = upp + note->n_descsz; xhdit->xen_info_size = (Elf_Word)((ulong)upp - (ulong)note); if (XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE < xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size) { error(WARNING, "note size is assumed on crash is incorrect.(xen info)\n"); return; } xhdit->note_size = xhdit->core_size + xhdit->xen_core_size + xhdit->xen_info_size; break; default: error(FATAL, "logic error in cheking elf note format occurs.\n"); } /* fill xen info context. */ if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) { if((xhdit->crash_note_xen_info_ptr = malloc(xhdit->xen_info_size)) == NULL) { error(FATAL, "cannot malloc dumpinfo table " "crash_note_xen_info_ptr space.\n"); } memcpy(xhdit->crash_note_xen_info_ptr, bp + xhdit->core_size + xhdit->xen_core_size, xhdit->xen_info_size); xhdit->context_xen_info.note = xhdit->context_array[samp_cpuid].note + xhdit->core_size + xhdit->xen_core_size; xhdit->context_xen_info.pcpu_id = samp_cpuid; xhdit->context_xen_info.crash_xen_info_ptr = xhdit->crash_note_xen_info_ptr + xhdit->xen_info_offset; } /* allocate note core */ size = xhdit->core_size * XEN_HYPER_NR_PCPUS(); if(!(xhdit->crash_note_core_array = malloc(size))) { error(FATAL, "cannot malloc crash_note_core_array space.\n"); } nccp = xhdit->crash_note_core_array; BZERO(nccp, size); xccp = NULL; /* allocate xen core */ if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) { size = xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(); if(!(xhdit->crash_note_xen_core_array = malloc(size))) { error(FATAL, "cannot malloc dumpinfo table " "crash_note_xen_core_array space.\n"); } xccp = xhdit->crash_note_xen_core_array; BZERO(xccp, size); } /* fill a context. */ for_cpu_indexes(i, cpuid) { /* fill core context. */ addr = xhdit->context_array[cpuid].note; if (!xen_hyper_fill_elf_notes(addr, nccp, XEN_HYPER_ELF_NOTE_FILL_T_CORE)) { error(FATAL, "cannot read elf note core.\n"); } xhdit->context_array[cpuid].pcpu_id = cpuid; xhdit->context_array[cpuid].ELF_Prstatus_ptr = nccp + xhdit->core_offset; xhdit->context_array[cpuid].pr_reg_ptr = nccp + xhdit->core_offset + XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg); /* Is there xen core data? */ if (xhdit->note_ver < XEN_HYPER_ELF_NOTE_V2) { nccp += xhdit->core_size; continue; } if (xhdit->note_ver == XEN_HYPER_ELF_NOTE_V2 && cpuid != samp_cpuid) { xccp += xhdit->xen_core_size; nccp += xhdit->core_size; continue; } /* fill xen core context, in case of more elf note V2. */ xhdit->context_xen_core_array[cpuid].note = xhdit->context_array[cpuid].note + xhdit->core_size; xhdit->context_xen_core_array[cpuid].pcpu_id = cpuid; xhdit->context_xen_core_array[cpuid].crash_xen_core_ptr = xccp + xhdit->xen_core_offset; if (!xen_hyper_fill_elf_notes(xhdit->context_xen_core_array[cpuid].note, xccp, XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE)) { error(FATAL, "cannot read elf note xen core.\n"); } xccp += xhdit->xen_core_size; nccp += xhdit->core_size; } FREEBUF(buf); } /* * Get dump information context from physical cpu id. */ struct xen_hyper_dumpinfo_context * xen_hyper_id_to_dumpinfo_context(uint id) { if (!xen_hyper_test_pcpu_id(id)) return NULL; return &xhdit->context_array[id]; } /* * Get dump information context from ELF Note address. */ struct xen_hyper_dumpinfo_context * xen_hyper_note_to_dumpinfo_context(ulong note) { int i; for (i = 0; i < XEN_HYPER_MAX_CPUS(); i++) { if (note == xhdit->context_array[i].note) { return &xhdit->context_array[i]; } } return NULL; } /* * Fill ELF Notes header here. * This assume that variable note has a top address of an area for * specified type. */ char * xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type) { long size; ulong rp = note; if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE) size = xhdit->note_size; else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE) size = xhdit->core_size; else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE) size = xhdit->xen_core_size; else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M) size = xhdit->core_size + xhdit->xen_core_size; else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS) size = XEN_HYPER_SIZE(ELF_Prstatus); else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS) size = XEN_HYPER_SIZE(xen_crash_xen_regs_t); else return NULL; if (!readmem(rp, KVADDR, note_buf, size, "note_buf_t or crash_note_t", RETURN_ON_ERROR)) { if (type == XEN_HYPER_ELF_NOTE_FILL_T_NOTE) error(WARNING, "cannot fill note_buf_t or crash_note_t.\n"); else if (type == XEN_HYPER_ELF_NOTE_FILL_T_CORE) error(WARNING, "cannot fill note core.\n"); else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE) error(WARNING, "cannot fill note xen core.\n"); else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_CORE_M) error(WARNING, "cannot fill note core & xen core.\n"); else if (type == XEN_HYPER_ELF_NOTE_FILL_T_PRS) error(WARNING, "cannot fill ELF_Prstatus.\n"); else if (type == XEN_HYPER_ELF_NOTE_FILL_T_XEN_REGS) error(WARNING, "cannot fill xen_crash_xen_regs_t.\n"); return NULL; } return note_buf; } /* * Get domain status. */ ulong xen_hyper_domain_state(struct xen_hyper_domain_context *dc) { if (ACTIVE()) { if (xen_hyper_read_domain_verify(dc->domain) == NULL) { return XEN_HYPER_DOMF_ERROR; } } return dc->domain_flags; } /* * Allocate domain context space. */ void xen_hyper_refresh_domain_context_space(void) { char *domain_struct; ulong domain, next, dom_xen, dom_io, idle_vcpu; struct xen_hyper_domain_context *dc; struct xen_hyper_domain_context *dom0; int i; if ((xhdt->flags & XEN_HYPER_DOMAIN_F_INIT) && !ACTIVE()) { return; } XEN_HYPER_RUNNING_DOMAINS() = XEN_HYPER_NR_DOMAINS() = xen_hyper_get_domains(); xen_hyper_alloc_domain_context_space(XEN_HYPER_NR_DOMAINS()); dc = xhdt->context_array; /* restore an dom_io context. */ get_symbol_data("dom_io", sizeof(dom_io), &dom_io); if ((domain_struct = xen_hyper_read_domain(dom_io)) == NULL) { error(FATAL, "cannot read dom_io.\n"); } xen_hyper_store_domain_context(dc, dom_io, domain_struct); xhdt->dom_io = dc; dc++; /* restore an dom_xen context. */ get_symbol_data("dom_xen", sizeof(dom_xen), &dom_xen); if ((domain_struct = xen_hyper_read_domain(dom_xen)) == NULL) { error(FATAL, "cannot read dom_xen.\n"); } xen_hyper_store_domain_context(dc, dom_xen, domain_struct); xhdt->dom_xen = dc; dc++; /* restore an idle domain context. */ for (i = 0; i < xht->idle_vcpu_size; i += XEN_HYPER_MAX_VIRT_CPUS) { idle_vcpu = xht->idle_vcpu_array[i]; if (idle_vcpu == 0) break; if (!readmem(idle_vcpu + MEMBER_OFFSET("vcpu", "domain"), KVADDR, &domain, sizeof(domain), "domain", RETURN_ON_ERROR)) { error(FATAL, "cannot read domain member in vcpu.\n"); } if (CRASHDEBUG(1)) { fprintf(fp, "idle_vcpu=%lx, domain=%lx\n", idle_vcpu, domain); } if ((domain_struct = xen_hyper_read_domain(domain)) == NULL) { error(FATAL, "cannot read idle domain.\n"); } xen_hyper_store_domain_context(dc, domain, domain_struct); if (i == 0) xhdt->idle_domain = dc; dc++; } /* restore domain contexts from dom0 symbol. */ xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_DOM0, &next); domain = next; dom0 = dc; while((domain_struct = xen_hyper_get_domain_next(XEN_HYPER_DOMAIN_READ_NEXT, &next)) != NULL) { xen_hyper_store_domain_context(dc, domain, domain_struct); domain = next; dc++; } xhdt->dom0 = dom0; } /* * Get number of domain. */ int xen_hyper_get_domains(void) { ulong domain, next_in_list; long domain_next_in_list; int i, j; if (!try_get_symbol_data("dom0", sizeof(void *), &domain)) get_symbol_data("hardware_domain", sizeof(void *), &domain); domain_next_in_list = MEMBER_OFFSET("domain", "next_in_list"); i = 0; while (domain != 0) { i++; next_in_list = domain + domain_next_in_list; if (!readmem(next_in_list, KVADDR, &domain, sizeof(void *), "domain.next_in_list", RETURN_ON_ERROR)) { error(FATAL, "cannot read domain.next_in_list.\n"); } } i += 2; /* for dom_io, dom_xen */ /* for idle domains */ for (j = 0; j < xht->idle_vcpu_size; j += XEN_HYPER_MAX_VIRT_CPUS) { if (xht->idle_vcpu_array[j]) i++; } return i; } /* * Get next domain struct. * mod - XEN_HYPER_DOMAIN_READ_DOM0:start from dom0 symbol * - XEN_HYPER_DOMAIN_READ_INIT:start from xhdt->context_array * - XEN_HYPER_DOMAIN_READ_NEXT:next */ char * xen_hyper_get_domain_next(int mod, ulong *next) { static int idx = 0; char *domain_struct; struct xen_hyper_domain_context *dc; switch (mod) { case XEN_HYPER_DOMAIN_READ_DOM0: /* Case of search from dom0 symbol. */ idx = 0; if (xhdt->dom0) { *next = xhdt->dom0->domain; } else { if (!try_get_symbol_data("dom0", sizeof(void *), next)) get_symbol_data("hardware_domain", sizeof(void *), next); } return xhdt->domain_struct; break; case XEN_HYPER_DOMAIN_READ_INIT: /* Case of search from context_array. */ if (xhdt->context_array && xhdt->context_array->domain) { idx = 1; /* this has a next index. */ *next = xhdt->context_array->domain; } else { idx = 0; *next = 0; return NULL; } return xhdt->domain_struct; break; case XEN_HYPER_DOMAIN_READ_NEXT: break; default : error(FATAL, "xen_hyper_get_domain_next mod error: %d\n", mod); return NULL; } /* Finished search */ if (!*next) { return NULL; } domain_struct = NULL; /* Is domain context array valid? */ if (idx) { if ((domain_struct = xen_hyper_read_domain(*next)) == NULL) { error(FATAL, "cannot get next domain from domain context array.\n"); } if (idx > XEN_HYPER_NR_DOMAINS()) { *next = 0; } else { dc = xhdt->context_array; dc += idx; *next = dc->domain; idx++; } return domain_struct; } /* Search from dom0 symbol. */ if ((domain_struct = xen_hyper_read_domain(*next)) == NULL) { error(FATAL, "cannot get next domain from dom0 symbol.\n"); } *next = ULONG(domain_struct + XEN_HYPER_OFFSET(domain_next_in_list)); return domain_struct; } /* * from domain address to id. */ domid_t xen_hyper_domain_to_id(ulong domain) { struct xen_hyper_domain_context *dc; /* Is domain context array valid? */ if (xhdt->context_array && xhdt->context_array->domain) { if ((dc = xen_hyper_domain_to_domain_context(domain)) == NULL) { return XEN_HYPER_DOMAIN_ID_INVALID; } else { return dc->domain_id; } } else { return XEN_HYPER_DOMAIN_ID_INVALID; } } /* * Get domain struct from id. */ char * xen_hyper_id_to_domain_struct(domid_t id) { char *domain_struct; struct xen_hyper_domain_context *dc; domain_struct = NULL; /* Is domain context array valid? */ if (xhdt->context_array && xhdt->context_array->domain) { if ((dc = xen_hyper_id_to_domain_context(id)) == NULL) { return NULL; } else { if ((domain_struct = xen_hyper_read_domain(dc->domain)) == NULL) { error(FATAL, "cannot get domain from domain context array with id.\n"); } return domain_struct; } } else { return NULL; } } /* * Get domain context from domain address. */ struct xen_hyper_domain_context * xen_hyper_domain_to_domain_context(ulong domain) { struct xen_hyper_domain_context *dc; int i; if (xhdt->context_array == NULL || xhdt->context_array->domain == 0) { return NULL; } if (!domain) { return NULL; } for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS(); i++, dc++) { if (domain == dc->domain) { return dc; } } return NULL; } /* * Get domain context from domain id. */ struct xen_hyper_domain_context * xen_hyper_id_to_domain_context(domid_t id) { struct xen_hyper_domain_context *dc; int i; if (xhdt->context_array == NULL || xhdt->context_array->domain == 0) { return NULL; } if (id == XEN_HYPER_DOMAIN_ID_INVALID) { return NULL; } for (i = 0, dc = xhdt->context_array; i < XEN_HYPER_NR_DOMAINS(); i++, dc++) { if (id == dc->domain_id) { return dc; } } return NULL; } /* * Store domain struct contents. */ struct xen_hyper_domain_context * xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc, ulong domain, char *dp) { char *vcpup; unsigned int max_vcpus; unsigned int i; dc->domain = domain; BCOPY((char *)(dp + XEN_HYPER_OFFSET(domain_domain_id)), &dc->domain_id, sizeof(domid_t)); dc->tot_pages = UINT(dp + XEN_HYPER_OFFSET(domain_tot_pages)); dc->max_pages = UINT(dp + XEN_HYPER_OFFSET(domain_max_pages)); dc->xenheap_pages = UINT(dp + XEN_HYPER_OFFSET(domain_xenheap_pages)); dc->shared_info = ULONG(dp + XEN_HYPER_OFFSET(domain_shared_info)); dc->sched_priv = ULONG(dp + XEN_HYPER_OFFSET(domain_sched_priv)); dc->next_in_list = ULONG(dp + XEN_HYPER_OFFSET(domain_next_in_list)); if (XEN_HYPER_VALID_MEMBER(domain_domain_flags)) dc->domain_flags = ULONG(dp + XEN_HYPER_OFFSET(domain_domain_flags)); else if (XEN_HYPER_VALID_MEMBER(domain_is_shut_down)) { dc->domain_flags = 0; if (XEN_HYPER_VALID_MEMBER(domain_is_hvm) && *(dp + XEN_HYPER_OFFSET(domain_is_hvm))) { dc->domain_flags |= XEN_HYPER_DOMS_HVM; } if (XEN_HYPER_VALID_MEMBER(domain_guest_type) && *(dp + XEN_HYPER_OFFSET(domain_guest_type))) { /* For now PVH and HVM are the same for crash. * and 0 is PV. */ dc->domain_flags |= XEN_HYPER_DOMS_HVM; } if (*(dp + XEN_HYPER_OFFSET(domain_is_privileged))) { dc->domain_flags |= XEN_HYPER_DOMS_privileged; } if (*(dp + XEN_HYPER_OFFSET(domain_debugger_attached))) { dc->domain_flags |= XEN_HYPER_DOMS_debugging; } if (XEN_HYPER_VALID_MEMBER(domain_is_polling) && *(dp + XEN_HYPER_OFFSET(domain_is_polling))) { dc->domain_flags |= XEN_HYPER_DOMS_polling; } if (XEN_HYPER_VALID_MEMBER(domain_is_paused_by_controller) && *(dp + XEN_HYPER_OFFSET(domain_is_paused_by_controller))) { dc->domain_flags |= XEN_HYPER_DOMS_ctrl_pause; } if (XEN_HYPER_VALID_MEMBER(domain_controller_pause_count) && *(dp + XEN_HYPER_OFFSET(domain_controller_pause_count))) { dc->domain_flags |= XEN_HYPER_DOMS_ctrl_pause; } if (*(dp + XEN_HYPER_OFFSET(domain_is_dying))) { dc->domain_flags |= XEN_HYPER_DOMS_dying; } if (*(dp + XEN_HYPER_OFFSET(domain_is_shutting_down))) { dc->domain_flags |= XEN_HYPER_DOMS_shuttingdown; } if (*(dp + XEN_HYPER_OFFSET(domain_is_shut_down))) { dc->domain_flags |= XEN_HYPER_DOMS_shutdown; } } else { dc->domain_flags = XEN_HYPER_DOMF_ERROR; } dc->evtchn = ULONG(dp + XEN_HYPER_OFFSET(domain_evtchn)); if (XEN_HYPER_VALID_MEMBER(domain_max_vcpus)) { max_vcpus = UINT(dp + XEN_HYPER_OFFSET(domain_max_vcpus)); } else if (XEN_HYPER_VALID_SIZE(domain_vcpu)) { max_vcpus = XEN_HYPER_SIZE(domain_vcpu) / sizeof(void *); } else { max_vcpus = XEN_HYPER_MAX_VIRT_CPUS; } if (!(dc->vcpu = malloc(sizeof(ulong) * max_vcpus))) { error(FATAL, "cannot malloc vcpu array (%d VCPUs).", max_vcpus); } if (MEMBER_TYPE("domain", "vcpu") == TYPE_CODE_ARRAY) vcpup = dp + XEN_HYPER_OFFSET(domain_vcpu); else { ulong vcpu_array = ULONG(dp + XEN_HYPER_OFFSET(domain_vcpu)); if (vcpu_array && max_vcpus) { if (!(vcpup = malloc(max_vcpus * sizeof(void *)))) { error(FATAL, "cannot malloc VCPU array for domain %lx.", domain); } if (!readmem(vcpu_array, KVADDR, vcpup, max_vcpus * sizeof(void*), "VCPU array", RETURN_ON_ERROR)) { error(FATAL, "cannot read VCPU array for domain %lx.", domain); } } else { vcpup = NULL; } } if (vcpup) { for (i = 0; i < max_vcpus; i++) { dc->vcpu[i] = ULONG(vcpup + i*sizeof(void *)); if (dc->vcpu[i]) XEN_HYPER_NR_VCPUS_IN_DOM(dc)++; } if (vcpup != dp + XEN_HYPER_OFFSET(domain_vcpu)) { free(vcpup); } } return dc; } /* * Read domain struct from domain context. */ char * xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc) { return xen_hyper_fill_domain_struct(dc->domain, xhdt->domain_struct); } /* * Read domain struct. */ char * xen_hyper_read_domain(ulong domain) { return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct); } /* * Read domain struct to verification. */ char * xen_hyper_read_domain_verify(ulong domain) { return xen_hyper_fill_domain_struct(domain, xhdt->domain_struct_verify); } /* * Fill domain struct. */ char * xen_hyper_fill_domain_struct(ulong domain, char *domain_struct) { if (!readmem(domain, KVADDR, domain_struct, XEN_HYPER_SIZE(domain), "fill_domain_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { error(WARNING, "cannot fill domain struct.\n"); return NULL; } return domain_struct; } /* * Allocate domain context space. */ void xen_hyper_alloc_domain_context_space(int domains) { if (xhdt->context_array == NULL) { if (!(xhdt->context_array = malloc(domains * sizeof(struct xen_hyper_domain_context)))) { error(FATAL, "cannot malloc context array (%d domains).", domains); } xhdt->context_array_cnt = domains; } else if (domains > xhdt->context_array_cnt) { struct xen_hyper_domain_context *dc; int i; for (dc = xhdt->context_array, i = 0; i < xhdt->context_array_cnt; ++dc, ++i) { if (dc->vcpu) free(dc->vcpu); } if (!(xhdt->context_array = realloc(xhdt->context_array, domains * sizeof(struct xen_hyper_domain_context)))) { error(FATAL, "cannot realloc context array (%d domains).", domains); } xhdt->context_array_cnt = domains; } BZERO(xhdt->context_array, domains * sizeof(struct xen_hyper_domain_context)); } /* * Get vcpu status. */ int xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc) { if (ACTIVE()) { if (xen_hyper_read_vcpu_verify(vcc->vcpu) == NULL) { return XEN_HYPER_RUNSTATE_ERROR; } } return vcc->state; } /* * Allocate vcpu context space. */ void xen_hyper_refresh_vcpu_context_space(void) { struct xen_hyper_domain_context *dc; struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vcc; int i, j; if ((xhvct->flags & XEN_HYPER_VCPU_F_INIT) && !ACTIVE()) { return; } xen_hyper_alloc_vcpu_context_arrays_space(XEN_HYPER_NR_DOMAINS()); for (i = 0, xht->vcpus = 0, dc = xhdt->context_array, vcca = xhvct->vcpu_context_arrays; i < XEN_HYPER_NR_DOMAINS(); i++, dc++, vcca++) { dc->vcpu_context_array = vcca; xen_hyper_alloc_vcpu_context_space(vcca, XEN_HYPER_NR_VCPUS_IN_DOM(dc)); for (j = 0, vcc = vcca->context_array; j < XEN_HYPER_NR_VCPUS_IN_DOM(dc); j++, vcc++) { xen_hyper_read_vcpu(dc->vcpu[j]); xen_hyper_store_vcpu_context(vcc, dc->vcpu[j], xhvct->vcpu_struct); } if (dc == xhdt->idle_domain) { xhvct->idle_vcpu_context_array = vcca; } xht->vcpus += vcca->context_array_cnt; } } /* * Get vcpu context from vcpu address. */ struct xen_hyper_vcpu_context * xen_hyper_vcpu_to_vcpu_context(ulong vcpu) { struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vcc; int i, j; if (!vcpu) { return NULL; } for (i = 0, vcca = xhvct->vcpu_context_arrays; i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { for (j = 0, vcc = vcca->context_array; j < vcca->context_array_cnt; j++, vcc++) { if (vcpu == vcc->vcpu) { return vcc; } } } return NULL; } /* * Get vcpu context. */ struct xen_hyper_vcpu_context * xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid) { struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vcc; int i; if (vcid == XEN_HYPER_VCPU_ID_INVALID) { return NULL; } if ((vcca = xen_hyper_domain_to_vcpu_context_array(domain))) { ; } else if (!(vcca = xen_hyper_domid_to_vcpu_context_array(did))) { return NULL; } for (i = 0, vcc = vcca->context_array; i < vcca->context_array_cnt; i++, vcc++) { if (vcid == vcc->vcpu_id) { return vcc; } } return NULL; } /* * Get pointer of a vcpu context array from domain address. */ struct xen_hyper_vcpu_context_array * xen_hyper_domain_to_vcpu_context_array(ulong domain) { struct xen_hyper_domain_context *dc; if(!(dc = xen_hyper_domain_to_domain_context(domain))) { return NULL; } return dc->vcpu_context_array; } /* * Get pointer of a vcpu context array from domain id. */ struct xen_hyper_vcpu_context_array * xen_hyper_domid_to_vcpu_context_array(domid_t id) { struct xen_hyper_domain_context *dc; if (!(dc = xen_hyper_id_to_domain_context(id))) { return NULL; } return dc->vcpu_context_array; } /* * Store vcpu struct contents. */ struct xen_hyper_vcpu_context * xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc, ulong vcpu, char *vcp) { vcc->vcpu = vcpu; vcc->vcpu_id = INT(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_id)); vcc->processor = INT(vcp + XEN_HYPER_OFFSET(vcpu_processor)); vcc->vcpu_info = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_info)); vcc->domain = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_domain)); vcc->next_in_list = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_next_in_list)); if (XEN_HYPER_VALID_MEMBER(vcpu_sleep_tick)) vcc->sleep_tick = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sleep_tick)); vcc->sched_priv = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_sched_priv)); vcc->state = INT(vcp + XEN_HYPER_OFFSET(vcpu_runstate) + XEN_HYPER_OFFSET(vcpu_runstate_info_state)); vcc->state_entry_time = ULONGLONG(vcp + XEN_HYPER_OFFSET(vcpu_runstate) + XEN_HYPER_OFFSET(vcpu_runstate_info_state_entry_time)); vcc->runstate_guest = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_runstate_guest)); if (XEN_HYPER_VALID_MEMBER(vcpu_vcpu_flags)) vcc->vcpu_flags = ULONG(vcp + XEN_HYPER_OFFSET(vcpu_vcpu_flags)); else vcc->vcpu_flags = XEN_HYPER_VCPUF_ERROR; return vcc; } /* * Read vcpu struct from vcpu context. */ char * xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc) { return xen_hyper_fill_vcpu_struct(vcc->vcpu, xhvct->vcpu_struct); } /* * Read vcpu struct. */ char * xen_hyper_read_vcpu(ulong vcpu) { return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct); } /* * Read vcpu struct to verification. */ char * xen_hyper_read_vcpu_verify(ulong vcpu) { return xen_hyper_fill_vcpu_struct(vcpu, xhvct->vcpu_struct_verify); } /* * Fill vcpu struct. */ char * xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct) { if (!readmem(vcpu, KVADDR, vcpu_struct, XEN_HYPER_SIZE(vcpu), "fill_vcpu_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { error(WARNING, "cannot fill vcpu struct.\n"); return NULL; } return vcpu_struct; } /* * Allocate vcpu context arrays space. */ void xen_hyper_alloc_vcpu_context_arrays_space(int domains) { struct xen_hyper_vcpu_context_array *vcca; if (xhvct->vcpu_context_arrays == NULL) { if (!(xhvct->vcpu_context_arrays = malloc(domains * sizeof(struct xen_hyper_vcpu_context_array)))) { error(FATAL, "cannot malloc context arrays (%d domains).", domains); } BZERO(xhvct->vcpu_context_arrays, domains * sizeof(struct xen_hyper_vcpu_context_array)); xhvct->vcpu_context_arrays_cnt = domains; } else if (domains > xhvct->vcpu_context_arrays_cnt) { if (!(xhvct->vcpu_context_arrays = realloc(xhvct->vcpu_context_arrays, domains * sizeof(struct xen_hyper_vcpu_context_array)))) { error(FATAL, "cannot realloc context arrays (%d domains).", domains); } vcca = xhvct->vcpu_context_arrays + domains; BZERO(vcca, (domains - xhvct->vcpu_context_arrays_cnt) * sizeof(struct xen_hyper_vcpu_context_array)); xhvct->vcpu_context_arrays_cnt = domains; } } /* * Allocate vcpu context space. */ void xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus) { if (!vcpus) { if (vcca->context_array != NULL) { free(vcca->context_array); vcca->context_array = NULL; } vcca->context_array_cnt = vcpus; } else if (vcca->context_array == NULL) { if (!(vcca->context_array = malloc(vcpus * sizeof(struct xen_hyper_vcpu_context)))) { error(FATAL, "cannot malloc context array (%d vcpus).", vcpus); } vcca->context_array_cnt = vcpus; } else if (vcpus > vcca->context_array_cnt) { if (!(vcca->context_array = realloc(vcca->context_array, vcpus * sizeof(struct xen_hyper_vcpu_context_array)))) { error(FATAL, "cannot realloc context array (%d vcpus).", vcpus); } vcca->context_array_cnt = vcpus; } vcca->context_array_valid = vcpus; BZERO(vcca->context_array, vcpus * sizeof(struct xen_hyper_vcpu_context)); } /* * Get pcpu context from pcpu id. */ struct xen_hyper_pcpu_context * xen_hyper_id_to_pcpu_context(uint id) { if (xhpct->context_array == NULL) { return NULL; } if (!xen_hyper_test_pcpu_id(id)) { return NULL; } return &xhpct->context_array[id]; } /* * Get pcpu context from pcpu address. */ struct xen_hyper_pcpu_context * xen_hyper_pcpu_to_pcpu_context(ulong pcpu) { struct xen_hyper_pcpu_context *pcc; int i; uint cpuid; if (xhpct->context_array == NULL) { return NULL; } if (!pcpu) { return NULL; } for_cpu_indexes(i, cpuid) { pcc = &xhpct->context_array[cpuid]; if (pcpu == pcc->pcpu) { return pcc; } } return NULL; } /* * Store pcpu struct contents. */ struct xen_hyper_pcpu_context * xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc, ulong pcpu, char *pcp) { pcc->pcpu = pcpu; pcc->processor_id = UINT(pcp + XEN_HYPER_OFFSET(cpu_info_processor_id)); pcc->guest_cpu_user_regs = (ulong)(pcpu + XEN_HYPER_OFFSET(cpu_info_guest_cpu_user_regs)); pcc->current_vcpu = ULONG(pcp + XEN_HYPER_OFFSET(cpu_info_current_vcpu)); return pcc; } /* * Store init_tss contents. */ struct xen_hyper_pcpu_context * xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc, ulong init_tss, char *tss) { int i; uint64_t *ist_p; pcc->init_tss = init_tss; if (machine_type("X86")) { pcc->sp.esp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_struct_esp0)); } else if (machine_type("X86_64")) { pcc->sp.rsp0 = ULONG(tss + XEN_HYPER_OFFSET(tss_struct_rsp0)); ist_p = (uint64_t *)(tss + XEN_HYPER_OFFSET(tss_struct_ist)); for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++, ist_p++) { pcc->ist[i] = ULONG(ist_p); } } return pcc; } /* * Read pcpu struct. */ char * xen_hyper_read_pcpu(ulong pcpu) { return xen_hyper_fill_pcpu_struct(pcpu, xhpct->pcpu_struct); } /* * Fill pcpu struct. */ char * xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct) { if (!readmem(pcpu, KVADDR, pcpu_struct, XEN_HYPER_SIZE(cpu_info), "fill_pcpu_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { error(WARNING, "cannot fill pcpu_struct.\n"); return NULL; } return pcpu_struct; } /* * Allocate pcpu context space. */ void xen_hyper_alloc_pcpu_context_space(int pcpus) { if (xhpct->context_array == NULL) { if (!(xhpct->context_array = malloc(pcpus * sizeof(struct xen_hyper_pcpu_context)))) { error(FATAL, "cannot malloc context array (%d pcpus).", pcpus); } } BZERO(xhpct->context_array, pcpus * sizeof(struct xen_hyper_pcpu_context)); } /* * Fill cpu_data. */ char * xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86) { ulong cpu_data; if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address) return NULL; cpu_data = xht->cpu_data_address + XEN_HYPER_SIZE(cpuinfo_x86) * idx; if (!readmem(cpu_data, KVADDR, cpuinfo_x86, XEN_HYPER_SIZE(cpuinfo_x86), "cpu_data", RETURN_ON_ERROR)) { error(WARNING, "cannot read cpu_data.\n"); return NULL; } return cpuinfo_x86; } char * xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64) { ulong cpu_data; if (!xen_hyper_test_pcpu_id(idx) || !xht->cpu_data_address) return NULL; cpu_data = xen_hyper_per_cpu(xht->cpu_data_address, idx); if (!readmem(cpu_data, KVADDR, cpuinfo_ia64, XEN_HYPER_SIZE(cpuinfo_ia64), "cpu_data", RETURN_ON_ERROR)) { error(WARNING, "cannot read cpu_data.\n"); return NULL; } return cpuinfo_ia64; } /* * Return whether vcpu is crashing. */ int xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc) { if (vcc == xht->crashing_vcc) return TRUE; return FALSE; } /* * Test whether cpu for pcpu id exists. */ int xen_hyper_test_pcpu_id(uint pcpu_id) { ulong *cpumask = xht->cpumask; uint i, j; if (pcpu_id == XEN_HYPER_PCPU_ID_INVALID || pcpu_id > XEN_HYPER_MAX_CPUS()) { return FALSE; } i = pcpu_id / (sizeof(ulong) * 8); j = pcpu_id % (sizeof(ulong) * 8); cpumask += i; if (*cpumask & (1UL << j)) { return TRUE; } else { return FALSE; } } /* * Calculate and return the uptime. */ ulonglong xen_hyper_get_uptime_hyper(void) { ulong jiffies, tmp1, tmp2; ulonglong jiffies_64, wrapped; if (symbol_exists("jiffies_64")) { get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64); wrapped = (jiffies_64 & 0xffffffff00000000ULL); if (wrapped) { wrapped -= 0x100000000ULL; jiffies_64 &= 0x00000000ffffffffULL; jiffies_64 |= wrapped; jiffies_64 += (ulonglong)(300*machdep->hz); } else { tmp1 = (ulong)(uint)(-300*machdep->hz); tmp2 = (ulong)jiffies_64; jiffies_64 = (ulonglong)(tmp2 - tmp1); } } else if (symbol_exists("jiffies")) { get_symbol_data("jiffies", sizeof(long), &jiffies); jiffies_64 = (ulonglong)jiffies; } else { jiffies_64 = 0; /* hypervisor does not have uptime */ } return jiffies_64; } /* * Get cpu informatin around. */ void xen_hyper_get_cpu_info(void) { ulong addr, init_begin, init_end; ulong *cpumask; uint *cpu_idx; int i, j, cpus; XEN_HYPER_STRUCT_SIZE_INIT(cpumask_t, "cpumask_t"); if (symbol_exists("nr_cpu_ids")) get_symbol_data("nr_cpu_ids", sizeof(uint), &xht->max_cpus); else { init_begin = symbol_value("__init_begin"); init_end = symbol_value("__init_end"); addr = symbol_value("max_cpus"); if (addr >= init_begin && addr < init_end) xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8; else { get_symbol_data("max_cpus", sizeof(xht->max_cpus), &xht->max_cpus); if (XEN_HYPER_SIZE(cpumask_t) * 8 > xht->max_cpus) xht->max_cpus = XEN_HYPER_SIZE(cpumask_t) * 8; } } if (xht->cpumask) { free(xht->cpumask); } if((xht->cpumask = malloc(XEN_HYPER_SIZE(cpumask_t))) == NULL) { error(FATAL, "cannot malloc cpumask space.\n"); } addr = symbol_value("cpu_present_map"); if (!readmem(addr, KVADDR, xht->cpumask, XEN_HYPER_SIZE(cpumask_t), "cpu_present_map", RETURN_ON_ERROR)) { error(FATAL, "cannot read cpu_present_map.\n"); } if (xht->cpu_idxs) { free(xht->cpu_idxs); } if((xht->cpu_idxs = malloc(sizeof(uint) * XEN_HYPER_MAX_CPUS())) == NULL) { error(FATAL, "cannot malloc cpu_idxs space.\n"); } memset(xht->cpu_idxs, 0xff, sizeof(uint) * XEN_HYPER_MAX_CPUS()); for (i = cpus = 0, cpumask = xht->cpumask, cpu_idx = xht->cpu_idxs; i < (XEN_HYPER_SIZE(cpumask_t)/sizeof(ulong)); i++, cpumask++) { for (j = 0; j < sizeof(ulong) * 8; j++) { if (*cpumask & (1UL << j)) { *cpu_idx++ = i * sizeof(ulong) * 8 + j; cpus++; } } } xht->pcpus = cpus; } /* * Calculate the number of physical cpu for x86. */ int xen_hyper_x86_get_smp_cpus(void) { if (xht->pcpus) { return xht->pcpus; } xen_hyper_get_cpu_info(); return xht->pcpus; } /* * Calculate used memory size for x86. */ uint64_t xen_hyper_x86_memory_size(void) { ulong vaddr; if (machdep->memsize) { return machdep->memsize; } vaddr = symbol_value("total_pages"); if (!readmem(vaddr, KVADDR, &xht->total_pages, sizeof(xht->total_pages), "total_pages", RETURN_ON_ERROR)) { error(WARNING, "cannot read total_pages.\n"); } xht->sys_pages = xht->total_pages; machdep->memsize = (uint64_t)(xht->sys_pages) * (uint64_t)(machdep->pagesize); return machdep->memsize; } /* * Calculate the number of physical cpu for ia64. */ int xen_hyper_ia64_get_smp_cpus(void) { return xen_hyper_x86_get_smp_cpus(); } /* * Calculate used memory size for ia64. */ uint64_t xen_hyper_ia64_memory_size(void) { return xen_hyper_x86_memory_size(); } /* * Calculate and return the speed of the processor. */ ulong xen_hyper_ia64_processor_speed(void) { ulong mhz, proc_freq; if (machdep->mhz) return(machdep->mhz); mhz = 0; if (!xht->cpu_data_address || !XEN_HYPER_VALID_STRUCT(cpuinfo_ia64) || XEN_HYPER_INVALID_MEMBER(cpuinfo_ia64_proc_freq)) return (machdep->mhz = mhz); readmem(xen_hyper_per_cpu(xht->cpu_data_address, xht->cpu_idxs[0]) + XEN_HYPER_OFFSET(cpuinfo_ia64_proc_freq), KVADDR, &proc_freq, sizeof(ulong), "cpuinfo_ia64 proc_freq", FAULT_ON_ERROR); mhz = proc_freq/1000000; return (machdep->mhz = mhz); } /* * Print an aligned string with specified length. */ void xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag) { char buf[XEN_HYPER_CMD_BUFSIZE]; int sl, r; char *s1, *s2; sl = strlen(str1); if (sl > len) { r = 0; } else { r = len - sl; } memset(buf, ' ', sizeof(buf)); buf[r] = '\0'; if (flag & XEN_HYPER_PRI_L) { s1 = str1; s2 = buf; } else { s1 = buf; s2 = str1; } if (str2) { fprintf(fp, "%s%s%s", s1, s2, str2); } else { fprintf(fp, "%s%s", s1, s2); } if (flag & XEN_HYPER_PRI_LF) { fprintf(fp, "\n"); } } ulong xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpuid) { struct xen_hyper_pcpu_context *pcc; struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vcc; int i, j; if (!xen_hyper_test_pcpu_id(pcpuid)) return 0; pcc = &xhpct->context_array[pcpuid]; if (pcc->current_vcpu) return pcc->current_vcpu; for (i = 0, vcca = xhvct->vcpu_context_arrays; i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { for (j = 0, vcc = vcca->context_array; j < vcca->context_array_cnt; j++, vcc++) { if (vcc->processor == pcpuid && vcc->state == XEN_HYPER_RUNSTATE_running) { return vcc->vcpu; } } } return 0; } ulong xen_hyper_pcpu_to_active_vcpu(ulong pcpu) { ulong vcpu; /* if pcpu is vcpu address, return it. */ if (pcpu & (~(PAGESIZE() - 1))) { return pcpu; } if(!(vcpu = XEN_HYPER_CURR_VCPU(pcpu))) error(FATAL, "invalid pcpu id\n"); return vcpu; } void xen_hyper_print_bt_header(FILE *out, ulong vcpu, int newline) { struct xen_hyper_vcpu_context *vcc; if (newline) fprintf(out, "\n"); vcc = xen_hyper_vcpu_to_vcpu_context(vcpu); if (!vcc) error(FATAL, "invalid vcpu\n"); fprintf(out, "PCPU: %2d VCPU: %lx\n", vcc->processor, vcpu); } #endif crash-7.1.4/s390.c0000775000000000000000000010046412634305150012174 0ustar rootroot/* s390.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2009-2010, 2012-2014 David Anderson * Copyright (C) 2002-2006, 2009-2010, 2012-2014 Red Hat, Inc. All rights reserved. * Copyright (C) 2005, 2006, 2010 Michael Holzheu, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef S390 #include "defs.h" #define S390_WORD_SIZE 4 #define S390_ADDR_MASK 0x7fffffff #define S390_PMD_BASE_MASK (~((1UL<<6)-1)) #define S390_PT_BASE_MASK S390_PMD_BASE_MASK #define S390_PAGE_BASE_MASK (~((1UL<<12)-1)) /* Flags used in entries of page dirs and page tables. */ #define S390_PAGE_PRESENT 0x001 /* set: loaded in physical memory * clear: not loaded in physical mem */ #define S390_RO_S390 0x200 /* HW read-only */ #define S390_PAGE_INVALID 0x400 /* HW invalid */ #define S390_PAGE_INVALID_MASK 0x601ULL /* for linux 2.6 */ #define S390_PAGE_INVALID_NONE 0x401ULL /* for linux 2.6 */ #define S390_PTE_INVALID_MASK 0x80000900 #define S390_PTE_INVALID(x) ((x) & S390_PTE_INVALID_MASK) #define INT_STACK_SIZE STACKSIZE() // can be 4096 or 8192 #define KERNEL_STACK_SIZE STACKSIZE() // can be 4096 or 8192 #define LOWCORE_SIZE 4096 /* * declarations of static functions */ static void s390_print_lowcore(char*, struct bt_info*,int); static int s390_kvtop(struct task_context *, ulong, physaddr_t *, int); static int s390_uvtop(struct task_context *, ulong, physaddr_t *, int); static int s390_vtop(unsigned long, ulong, physaddr_t*, int); static ulong s390_vmalloc_start(void); static int s390_is_task_addr(ulong); static int s390_verify_symbol(const char *, ulong, char type); static ulong s390_get_task_pgd(ulong); static int s390_translate_pte(ulong, void *, ulonglong); static ulong s390_processor_speed(void); static int s390_eframe_search(struct bt_info *); static void s390_back_trace_cmd(struct bt_info *); static void s390_get_stack_frame(struct bt_info *, ulong *, ulong *); static int s390_dis_filter(ulong, char *, unsigned int); static void s390_cmd_mach(void); static int s390_get_smp_cpus(void); static void s390_display_machine_stats(void); static void s390_dump_line_number(ulong); static struct line_number_hook s390_line_number_hooks[]; static int s390_is_uvaddr(ulong, struct task_context *); /* * struct lowcore name (old: "_lowcore", new: "lowcore") */ static char *lc_struct; /* * Initialize member offsets */ static void s390_offsets_init(void) { if (STRUCT_EXISTS("lowcore")) lc_struct = "lowcore"; else lc_struct = "_lowcore"; if (MEMBER_EXISTS(lc_struct, "st_status_fixed_logout")) MEMBER_OFFSET_INIT(s390_lowcore_psw_save_area, lc_struct, "st_status_fixed_logout"); else MEMBER_OFFSET_INIT(s390_lowcore_psw_save_area, lc_struct, "psw_save_area"); } /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void s390_init(int when) { switch (when) { case PRE_SYMTAB: machdep->verify_symbol = s390_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); // machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = (char *)malloc(SEGMENT_TABLE_SIZE)) == NULL) error(FATAL, "cannot malloc pgd space."); machdep->pmd = machdep->pgd; if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: machdep->kvbase = 0; machdep->identity_map_base = 0; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = s390_is_uvaddr; machdep->eframe_search = s390_eframe_search; machdep->back_trace = s390_back_trace_cmd; machdep->processor_speed = s390_processor_speed; machdep->uvtop = s390_uvtop; machdep->kvtop = s390_kvtop; machdep->get_task_pgd = s390_get_task_pgd; machdep->get_stack_frame = s390_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = s390_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = s390_is_task_addr; machdep->dis_filter = s390_dis_filter; machdep->cmd_mach = s390_cmd_mach; machdep->get_smp_cpus = s390_get_smp_cpus; machdep->line_number_hooks = s390_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; vt->flags |= COMMON_VADDR; break; case POST_GDB: if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 0; machdep->vmalloc_start = s390_vmalloc_start; machdep->dump_irq = generic_dump_irq; if (!machdep->hz) machdep->hz = HZ; machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; s390_offsets_init(); break; case POST_INIT: break; } } /* * Dump machine dependent information */ void s390_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->kvbase); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", (ulonglong)machdep->memsize, (ulonglong)machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: s390_eframe_search()\n"); fprintf(fp, " back_trace: s390_back_trace_cmd()\n"); fprintf(fp, " processor_speed: s390_processor_speed()\n"); fprintf(fp, " uvtop: s390_uvtop()\n"); fprintf(fp, " kvtop: s390_kvtop()\n"); fprintf(fp, " get_task_pgd: s390_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: s390_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: s390_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: s390_vmalloc_start()\n"); fprintf(fp, " is_task_addr: s390_is_task_addr()\n"); fprintf(fp, " verify_symbol: s390_verify_symbol()\n"); fprintf(fp, " dis_filter: s390_dis_filter()\n"); fprintf(fp, " cmd_mach: s390_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: s390_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: s390_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: s390_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } /* * Check if address is in context's address space */ static int s390_is_uvaddr(ulong vaddr, struct task_context *tc) { return IN_TASK_VMA(tc->task, vaddr); } /* * Translates a user virtual address to its physical address */ static int s390_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long pgd_base; readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd_base,sizeof(long), "pgd_base",FAULT_ON_ERROR); return s390_vtop(pgd_base, vaddr, paddr, verbose); } /* * Translates a kernel virtual address to its physical address */ static int s390_kvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long pgd_base; if (!IS_KVADDR(vaddr)){ *paddr = 0; return FALSE; } if (!vt->vmalloc_start) { *paddr = VTOP(vaddr); return TRUE; } if (!IS_VMALLOC_ADDR(vaddr)) { *paddr = VTOP(vaddr); return TRUE; } pgd_base = (unsigned long)vt->kernel_pgd[0]; return s390_vtop(pgd_base, vaddr, paddr, verbose); } /* * Check if page is mapped */ static inline int s390_pte_present(unsigned long x) { if(THIS_KERNEL_VERSION >= LINUX(2,6,0)) { return !((x) & S390_PAGE_INVALID) || ((x) & S390_PAGE_INVALID_MASK)==S390_PAGE_INVALID_NONE; } else { return((x) & S390_PAGE_PRESENT); } } /* * page table traversal functions */ /* Segment table traversal function */ static ulong _kl_sg_table_deref_s390(ulong vaddr, ulong table, int len) { ulong offset, entry; offset = ((vaddr >> 20) & 0x7ffUL) * 4; if (offset >= (len + 1)*64) /* Offset is over the table limit. */ return 0; readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); /* * Check if the segment table entry could be read and doesn't have * any of the reserved bits set. */ if (entry & 0x80000000UL) return 0; /* Check if the segment table entry has the invalid bit set. */ if (entry & 0x40UL) return 0; /* Segment table entry is valid and well formed. */ return entry; } /* Page table traversal function */ static ulong _kl_pg_table_deref_s390(ulong vaddr, ulong table, int len) { ulong offset, entry; offset = ((vaddr >> 12) & 0xffUL) * 4; if (offset >= (len + 1)*64) /* Offset is over the table limit. */ return 0; readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); /* * Check if the page table entry could be read and doesn't have * any of the reserved bits set. */ if (entry & 0x80000900UL) return 0; /* Check if the page table entry has the invalid bit set. */ if (entry & 0x400UL) return 0; /* Page table entry is valid and well formed. */ return entry; } /* lookup virtual address in page tables */ static int s390_vtop(unsigned long table, ulong vaddr, physaddr_t *phys_addr, int verbose) { ulong entry, paddr; int len; /* * Get the segment table entry. * We assume that the segment table length field in the asce * is set to the maximum value of 127 (which translates to * a segment table with 2048 entries) and that the addressing * mode is 31 bit. */ entry = _kl_sg_table_deref_s390(vaddr, table, 127); if (!entry) return FALSE; table = entry & 0x7ffffc00UL; len = entry & 0xfUL; /* Get the page table entry */ entry = _kl_pg_table_deref_s390(vaddr, table, len); if (!entry) return FALSE; /* Isolate the page origin from the page table entry. */ paddr = entry & 0x7ffff000UL; /* Add the page offset and return the final value. */ *phys_addr = paddr + (vaddr & 0xfffUL); return TRUE; } /* * Determine where vmalloc'd memory starts. */ static ulong s390_vmalloc_start(void) { unsigned long highmem_addr,high_memory; highmem_addr=symbol_value("high_memory"); readmem(highmem_addr, PHYSADDR, &high_memory,sizeof(long), "highmem",FAULT_ON_ERROR); return high_memory; } /* * Check if address can be a valid task_struct */ static int s390_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * return MHz - unfortunately it is not possible to get this on linux * for zSeries */ static ulong s390_processor_speed(void) { return 0; } /* * Accept or reject a symbol from the kernel namelist. */ static int s390_verify_symbol(const char *name, ulong value, char type) { int i; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "startup") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (STREQ(name, "Letext") || STREQ(name, "gcc2_compiled.")) return FALSE; /* reject L2^B symbols */ if (strstr(name, "L2\002") == name) return FALSE; /* throw away all symbols containing a '.' */ for(i = 0; i < strlen(name);i++){ if(name[i] == '.') return FALSE; } return TRUE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong s390_get_task_pgd(ulong task) { return (error(FATAL, "s390_get_task_pgd: TBD\n")); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int s390_translate_pte(ulong pte, void *physaddr, ulonglong unused) { char *arglist[MAXARGS]; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; int c,len1,len2,len3; if(S390_PTE_INVALID(pte)){ fprintf(fp,"PTE is invalid\n"); return FALSE; } if(physaddr) *((ulong *)physaddr) = pte & S390_PAGE_BASE_MASK; if(!s390_pte_present(pte)){ swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|LJUST, "PTE"), mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); sprintf(ptebuf, "%lx", pte); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return FALSE; } fprintf(fp,"PTE PHYSICAL FLAGS\n"); fprintf(fp,"%08lx %08lx",pte, pte & S390_PAGE_BASE_MASK); fprintf(fp," ("); if(pte & S390_PAGE_INVALID) fprintf(fp,"INVALID "); if(pte & S390_RO_S390) fprintf(fp,"PROTECTION"); fprintf(fp,")"); return TRUE; } /* * Look for likely exception frames in a stack. */ static int s390_eframe_search(struct bt_info *bt) { if(bt->flags & BT_EFRAME_SEARCH2) return (error(FATAL, "Option '-E' is not implemented for this architecture\n")); else return (error(FATAL, "Option '-e' is not implemented for this architecture\n")); } /* * returns cpu number of task */ static int s390_cpu_of_task(unsigned long task) { int cpu; if(VALID_MEMBER(task_struct_processor)){ /* linux 2.4 */ readmem(task + OFFSET(task_struct_processor),KVADDR, &cpu, sizeof(cpu), "task_struct_processor", FAULT_ON_ERROR); } else { char thread_info[8192]; unsigned long thread_info_addr; readmem(task + OFFSET(task_struct_thread_info),KVADDR, &thread_info_addr, sizeof(thread_info_addr), "thread info addr", FAULT_ON_ERROR); readmem(thread_info_addr,KVADDR,thread_info,sizeof(thread_info), "thread info", FAULT_ON_ERROR); cpu = *((int*) &thread_info[OFFSET(thread_info_cpu)]); } return cpu; } /* * returns true, if task of bt currently is executed by a cpu */ static int s390_has_cpu(struct bt_info *bt) { int cpu = bt->tc->processor; if (is_task_active(bt->task) && (kt->cpu_flags[cpu] & ONLINE_MAP)) return TRUE; else return FALSE; } /* * read lowcore for cpu */ static void s390_get_lowcore(int cpu, char* lowcore) { unsigned long lowcore_array,lowcore_ptr; lowcore_array = symbol_value("lowcore_ptr"); readmem(lowcore_array + cpu * S390_WORD_SIZE,KVADDR, &lowcore_ptr, sizeof(long), "lowcore_ptr", FAULT_ON_ERROR); readmem(lowcore_ptr, KVADDR, lowcore, LOWCORE_SIZE, "lowcore", FAULT_ON_ERROR); } /* * Read interrupt stack (either "async_stack" or "panic_stack"); */ static void s390_get_int_stack(char *stack_name, char* lc, char* int_stack, unsigned long* start, unsigned long* end) { unsigned long stack_addr; if (!MEMBER_EXISTS(lc_struct, stack_name)) return; stack_addr = ULONG(lc + MEMBER_OFFSET(lc_struct, stack_name)); if (stack_addr == 0) return; readmem(stack_addr - INT_STACK_SIZE, KVADDR, int_stack, INT_STACK_SIZE, stack_name, FAULT_ON_ERROR); *start = stack_addr - INT_STACK_SIZE; *end = stack_addr; } /* * Unroll a kernel stack. */ static void s390_back_trace_cmd(struct bt_info *bt) { char* stack; char async_stack[INT_STACK_SIZE]; char panic_stack[INT_STACK_SIZE]; long ksp,backchain,old_backchain; int i=0, r14_offset,bc_offset,r14, skip_first_frame=0; unsigned long async_start = 0, async_end = 0; unsigned long panic_start = 0, panic_end = 0; unsigned long stack_end, stack_start, stack_base; char buf[BUFSIZE]; int cpu = bt->tc->processor; if (bt->hp && bt->hp->eip) { error(WARNING, "instruction pointer argument ignored on this architecture!\n"); } if (is_task_active(bt->task) && !(kt->cpu_flags[cpu] & ONLINE_MAP)) { fprintf(fp, " CPU offline\n"); return; } ksp = bt->stkptr; /* print lowcore and get async stack when task has cpu */ if(s390_has_cpu(bt)){ char lowcore[LOWCORE_SIZE]; unsigned long psw_flags; int cpu = s390_cpu_of_task(bt->task); if (ACTIVE()) { fprintf(fp,"(active)\n"); return; } s390_get_lowcore(cpu,lowcore); psw_flags = ULONG(lowcore + OFFSET(s390_lowcore_psw_save_area)); if(psw_flags & 0x10000UL){ fprintf(fp,"Task runs in userspace\n"); s390_print_lowcore(lowcore,bt,0); return; } s390_get_int_stack("async_stack", lowcore, async_stack, &async_start, &async_end); s390_get_int_stack("panic_stack", lowcore, panic_stack, &panic_start, &panic_end); s390_print_lowcore(lowcore,bt,1); fprintf(fp,"\n"); skip_first_frame=1; } /* get task stack start and end */ if(THIS_KERNEL_VERSION >= LINUX(2,6,0)){ readmem(bt->task + OFFSET(task_struct_thread_info),KVADDR, &stack_start, sizeof(long), "thread info", FAULT_ON_ERROR); } else { stack_start = bt->task; } stack_end = stack_start + KERNEL_STACK_SIZE; if(!STRUCT_EXISTS("stack_frame")){ r14_offset = 56; bc_offset=0; } else { r14_offset = MEMBER_OFFSET("stack_frame","gprs") + 8 * S390_WORD_SIZE; bc_offset = MEMBER_OFFSET("stack_frame","back_chain"); } backchain = ksp; do { unsigned long r14_stack_off; struct load_module *lm; int j; ulong offset; char *name_plus_offset; struct syment *sp; /* Find stack: Either async, panic stack or task stack */ if((backchain > stack_start) && (backchain < stack_end)){ stack = bt->stackbuf; stack_base = stack_start; } else if((backchain > async_start) && (backchain < async_end) && s390_has_cpu(bt)){ stack = async_stack; stack_base = async_start; } else if((backchain > panic_start) && (backchain < panic_end) && s390_has_cpu(bt)){ stack = panic_stack; stack_base = panic_start; } else { /* invalid stackframe */ break; } r14_stack_off=backchain - stack_base + r14_offset; r14 = ULONG(&stack[r14_stack_off]) & S390_ADDR_MASK; /* print function name */ if(BT_REFERENCE_CHECK(bt)){ if(bt->ref->cmdflags & BT_REF_HEXVAL){ if(r14 == bt->ref->hexval) bt->ref->cmdflags |= BT_REF_FOUND; } else { if(STREQ(closest_symbol(r14),bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } } else if(skip_first_frame){ skip_first_frame=0; } else { fprintf(fp," #%i [%08lx] ",i,backchain); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(r14, &offset); if (sp && offset) name_plus_offset = value_to_symstr(r14, buf, bt->radix); } fprintf(fp,"%s at %x", name_plus_offset ? name_plus_offset : closest_symbol(r14), r14); if (module_symbol(r14, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); if (bt->flags & BT_LINE_NUMBERS) s390_dump_line_number(r14); i++; } old_backchain=backchain; backchain = ULONG(&stack[backchain - stack_base + bc_offset]); /* print stack content if -f is specified */ if((bt->flags & BT_FULL) && !BT_REFERENCE_CHECK(bt)){ int frame_size; if(backchain == 0){ frame_size = stack_base - old_backchain + KERNEL_STACK_SIZE; } else { frame_size = MIN((backchain - old_backchain), (stack_base - old_backchain + KERNEL_STACK_SIZE)); } for(j=0; j< frame_size; j+=4){ if(j % 16 == 0){ fprintf(fp,"\n%08lx: ",old_backchain+j); } fprintf(fp," %s", format_stack_entry(bt, buf, ULONG(&stack[old_backchain - stack_base + j]), 0)); } fprintf(fp,"\n\n"); } /* Check for interrupt stackframe */ if((backchain == 0) && (stack == async_stack)){ unsigned long psw_flags,r15; psw_flags = ULONG(&stack[old_backchain - stack_base +96 +MEMBER_OFFSET("pt_regs","psw")]); if(psw_flags & 0x10000UL){ /* User psw: should not happen */ break; } r15 = ULONG(&stack[old_backchain - stack_base + 96 + MEMBER_OFFSET("pt_regs", "gprs") + 15 * S390_WORD_SIZE]); backchain=r15; fprintf(fp," - Interrupt -\n"); } } while(backchain != 0); } /* * print lowcore info (psw and all registers) */ static void s390_print_lowcore(char* lc, struct bt_info *bt, int show_symbols) { char* ptr; unsigned long tmp[4]; ptr = lc + OFFSET(s390_lowcore_psw_save_area); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390_WORD_SIZE); if(BT_REFERENCE_CHECK(bt)){ if(bt->ref->cmdflags & BT_REF_HEXVAL){ if(tmp[1] == bt->ref->hexval) bt->ref->cmdflags |= BT_REF_FOUND; } else { if(STREQ(closest_symbol(tmp[1]),bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } return; } fprintf(fp," LOWCORE INFO:\n"); fprintf(fp," -psw : %#010lx %#010lx\n", tmp[0], tmp[1]); if(show_symbols){ fprintf(fp," -function : %s at %lx\n", closest_symbol(tmp[1] & S390_ADDR_MASK), tmp[1] & S390_ADDR_MASK); if (bt->flags & BT_LINE_NUMBERS) s390_dump_line_number(tmp[1] & S390_ADDR_MASK); } ptr = lc + MEMBER_OFFSET(lc_struct, "cpu_timer_save_area"); tmp[0]=UINT(ptr); tmp[1]=UINT(ptr + S390_WORD_SIZE); fprintf(fp," -cpu timer: %#010lx %#010lx\n", tmp[0],tmp[1]); ptr = lc + MEMBER_OFFSET(lc_struct, "clock_comp_save_area"); tmp[0]=UINT(ptr); tmp[1]=UINT(ptr + S390_WORD_SIZE); fprintf(fp," -clock cmp: %#010lx %#010lx\n", tmp[0], tmp[1]); fprintf(fp," -general registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "gpregs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0],tmp[1],tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0],tmp[1],tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE); tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0],tmp[1],tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE); tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE); tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE); tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); fprintf(fp," -access registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "access_regs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE); tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE); tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE); tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE); tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); fprintf(fp," -control registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "cregs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 10 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 11 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 12 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 13 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 14 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 15 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); ptr = lc + MEMBER_OFFSET(lc_struct, "floating_pt_save_area"); fprintf(fp," -floating point registers 0,2,4,6:\n"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + 2 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 4 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 6 * S390_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0], tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2], tmp[3]); } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void s390_get_stack_frame(struct bt_info *bt, ulong *eip, ulong *esp) { unsigned long ksp, r14; int r14_offset; char lowcore[LOWCORE_SIZE]; if(s390_has_cpu(bt)) s390_get_lowcore(s390_cpu_of_task(bt->task),lowcore); /* get the stack pointer */ if(esp){ if(s390_has_cpu(bt)){ ksp = ULONG(lowcore + MEMBER_OFFSET(lc_struct, "gpregs_save_area") + (15 * S390_WORD_SIZE)); } else { readmem(bt->task + OFFSET(task_struct_thread_ksp), KVADDR, &ksp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } *esp = ksp; } else { /* for 'bt -S' */ ksp=bt->hp->esp; } /* get the instruction address */ if(!eip) return; if(s390_has_cpu(bt) && esp){ *eip = ULONG(lowcore + OFFSET(s390_lowcore_psw_save_area) + S390_WORD_SIZE) & S390_ADDR_MASK; } else { if(!STRUCT_EXISTS("stack_frame")){ r14_offset = 56; } else { r14_offset = MEMBER_OFFSET("stack_frame","gprs") + 8 * S390_WORD_SIZE; } readmem(ksp + r14_offset,KVADDR,&r14,sizeof(void*),"eip", FAULT_ON_ERROR); *eip=r14 & S390_ADDR_MASK; } } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int s390_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * so this routine both fixes the references as well as imposing the current * output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int s390_get_smp_cpus(void) { return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Machine dependent command. */ void s390_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != EOF) { switch(c) { case 'c': fprintf(fp,"'-c' option is not implemented on this architecture\n"); return; case 'm': fprintf(fp,"'-m' option is not implemented on this architecture\n"); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); s390_display_machine_stats(); } /* * "mach" command output. */ static void s390_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } static const char *hook_files[] = { "arch/s390/kernel/entry.S", "arch/s390/kernel/head.S" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook s390_line_number_hooks[] = { {"startup",HEAD_S}, {"_stext",HEAD_S}, {"_pstart",HEAD_S}, {"system_call",ENTRY_S}, {"sysc_do_svc",ENTRY_S}, {"sysc_do_restart",ENTRY_S}, {"sysc_return",ENTRY_S}, {"sysc_sigpending",ENTRY_S}, {"sysc_restart",ENTRY_S}, {"sysc_singlestep",ENTRY_S}, {"sysc_tracesys",ENTRY_S}, {"ret_from_fork",ENTRY_S}, {"pgm_check_handler",ENTRY_S}, {"io_int_handler",ENTRY_S}, {"io_return",ENTRY_S}, {"ext_int_handler",ENTRY_S}, {"mcck_int_handler",ENTRY_S}, {"mcck_return",ENTRY_S}, {"restart_int_handler",ENTRY_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void s390_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) { fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); } else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } #endif crash-7.1.4/lkcd_v8.c0000664000000000000000000004646612634305150013040 0ustar rootroot/* lkcd_v8.c - core analysis suite * * Forward ported from lkcd_v7.c by Corey Mineyard * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002 Silicon Graphics, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #include "lkcd_dump_v8.h" /* REMIND */ static dump_header_t dump_header_v8 = { 0 }; #ifndef HAVE_NO_DUMP_HEADER_ASM static dump_header_asm_t dump_header_asm_v8 = { 0 }; #endif static dump_page_t dump_page = { 0 }; static void mclx_cache_page_headers_v8(void); static off_t lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE; #if defined(X86_64) int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) { if (eip) *eip = dump_header_asm_v8.dha_smp_regs[cpu].rip; if (esp) *esp = dump_header_asm_v8.dha_smp_regs[cpu].rsp; return 0; } #elif defined(X86) int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) { if (eip) *eip = dump_header_asm_v8.dha_smp_regs[cpu].eip; if (esp) *esp = dump_header_asm_v8.dha_smp_regs[cpu].esp; return 0; } #else int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) { return -1; } #endif int get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp) { int cpu = bt->tc->processor; if (!bt || !bt->tc) { fprintf(stderr, "get_lkcd_regs_for_cpu_v8: invalid tc " "(CPU=%d)\n", cpu); return -EINVAL; } if (cpu >= NR_CPUS) { fprintf(stderr, "get_lkcd_regs_for_cpu_v8, cpu (%d) too high\n", cpu); return -EINVAL; } return get_lkcd_regs_for_cpu_arch(cpu, eip, esp); } #ifndef HAVE_NO_DUMP_HEADER_ASM int lkcd_dump_init_v8_arch(dump_header_t *dh) { off_t ret_of; ssize_t ret_sz; uint32_t hdr_size, offset, nr_cpus; dump_header_asm_t arch_hdr; char *hdr_buf = NULL; ret_of = lseek(lkcd->fd, dh->dh_header_size + offsetof(dump_header_asm_t, dha_header_size), SEEK_SET); if (ret_of < 0) { perror("lseek failed in " __FILE__ ":" STR(__LINE__)); goto err; } ret_sz = read(lkcd->fd, (char *)&hdr_size, sizeof(hdr_size)); if (ret_sz != sizeof(hdr_size)) { perror("Reading hdr_size failed in " __FILE__ ":" STR(__LINE__)); goto err; } ret_of = lseek(lkcd->fd, dh->dh_header_size, SEEK_SET); if (ret_of < 0) { perror("lseek failed in " __FILE__ ":" STR(__LINE__)); goto err; } hdr_buf = (char *)malloc(hdr_size); if (!hdr_buf) { perror("Could not allocate memory for dump header\n"); goto err; } ret_sz = read(lkcd->fd, (char *)hdr_buf, hdr_size); if (ret_sz != hdr_size) { perror("Could not read header " __FILE__ ":" STR(__LINE__)); goto err; } /* * Though we have KL_NR_CPUS is 128, the header size is different * CONFIG_NR_CPUS might be different in the kernel. Hence, need * to find out how many CPUs are configured. */ offset = offsetof(dump_header_asm_t, dha_smp_regs[0]); nr_cpus = (hdr_size - offset) / sizeof(dump_CPU_info_t); /* check for CPU overflow */ if (nr_cpus > NR_CPUS) { fprintf(stderr, "CPU number too high %d (%s:%d)\n", nr_cpus, __FILE__, __LINE__); goto err; } /* parts that don't depend on the number of CPUs */ memcpy(&arch_hdr, (void *)hdr_buf, offset); /* registers */ memcpy(&arch_hdr.dha_smp_regs, (void *)&hdr_buf[offset], nr_cpus * sizeof(struct pt_regs)); offset += nr_cpus * sizeof(struct pt_regs); /* current task */ memcpy(&arch_hdr.dha_smp_current_task, (void *)&hdr_buf[offset], nr_cpus * sizeof(&arch_hdr.dha_smp_current_task[0])); offset += nr_cpus * sizeof(&arch_hdr.dha_smp_current_task[0]); /* stack */ memcpy(&arch_hdr.dha_stack, (void *)&hdr_buf[offset], nr_cpus * sizeof(&arch_hdr.dha_stack[0])); offset += nr_cpus * sizeof(&arch_hdr.dha_stack[0]); /* stack_ptr */ memcpy(&arch_hdr.dha_stack_ptr, (void *)&hdr_buf[offset], nr_cpus * sizeof(&arch_hdr.dha_stack_ptr[0])); offset += nr_cpus * sizeof(&arch_hdr.dha_stack_ptr[0]); if (arch_hdr.dha_magic_number != DUMP_ASM_MAGIC_NUMBER) { fprintf(stderr, "Invalid magic number for x86_64\n"); goto err; } /* * read the kernel load address on IA64 -- other architectures have * no relocatable kernel at the lifetime of LKCD */ #ifdef IA64 memcpy(&arch_hdr.dha_kernel_addr, (void *)&hdr_buf[offset], sizeof(uint64_t)); #endif memcpy(&dump_header_asm_v8, &arch_hdr, sizeof(dump_header_asm_t)); return 0; err: free(hdr_buf); return -1; } #else /* architecture that has no lkcd_dump_init_v8 */ int lkcd_dump_init_v8_arch(dump_header_t *dh) { return 0; } #endif /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v8(FILE *fp, int fd, char *dumpfile) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; int dump_index_size ATTRIBUTE_UNUSED; int dump_index_created ATTRIBUTE_UNUSED; static char dumpfile_index_name[128]; int ifd ATTRIBUTE_UNUSED; uint64_t dh_dump_buffer_size; lkcd->fd = fd; lkcd->fp = fp; dump_index_created = 0; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v8; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; if ((dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9){ if (read(lkcd->fd, &dh_dump_buffer_size, sizeof(dh_dump_buffer_size)) != sizeof(dh_dump_buffer_size)) return FALSE; lkcd_offset_to_first_page = dh_dump_buffer_size; } else lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE; lkcd->dump_page = dp; lkcd->dump_header = dh; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); if (lkcd_dump_init_v8_arch(dh) != 0) { fprintf(stderr, "Warning: Failed to initialise " "arch specific dump code\n"); } #ifdef IA64 if ( (fix_addr_v8(&dump_header_asm_v8) == -1) ) return FALSE; #endif /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->total_pages = dh->dh_num_pages; /* * REMIND: dh_memory_size should be in physical pages and seems to be wrong. * pad by two for now; 3DFE8 should be 40000. */ lkcd->memory_pages = dh->dh_memory_size; lkcd->page_offsets = 0; lkcd->ifd = -1; lkcd->dumpfile_index = NULL; /* Keep from getting unused warnings */ dump_index_size = 0; dump_index_created = 0; strcpy(dumpfile_index_name, dumpfile); ifd = 0; #ifdef LKCD_INDEX_FILE lkcd->memory_pages = dh->dh_memory_size * 2; dump_index_size = (lkcd->memory_pages * sizeof(off_t)); lkcd->page_offsets = 0; strcpy(dumpfile_index_name, dumpfile); lkcd->dumpfile_index = strcat(dumpfile_index_name, ".index"); ifd = open(lkcd->dumpfile_index, O_RDWR, 0644); if( ifd < 0 ) { int err; ifd = open(lkcd->dumpfile_index, (O_RDWR | O_CREAT), 0644); if (ifd > 0) { err = ftruncate(ifd, dump_index_size); if (err == -1) { lkcd->dumpfile_index = NULL; close(ifd); ifd = -1; } else { dump_index_created++; } } } if (ifd >= 0) { /* MAP_SHARED so we can sync the file */ lkcd->page_offsets = mmap( (void *)0, dump_index_size, (PROT_READ | PROT_WRITE), MAP_SHARED, ifd, (off_t)0); if (lkcd->page_offsets == MAP_FAILED) { close(ifd); ifd = -1; lkcd->dumpfile_index = NULL; lkcd->page_offsets = 0; } } lkcd->ifd = ifd; #endif lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->zoned_offsets = 0; lkcd->get_dp_flags = get_dp_flags_v8; lkcd->get_dp_address = get_dp_address_v8; lkcd->get_dp_size = get_dp_size_v8; lkcd->compression = dh->dh_dump_compress; lkcd->page_header_size = sizeof(dump_page_t); lseek(lkcd->fd, lkcd_offset_to_first_page, SEEK_SET); /* * Read all of the pages and save the page offsets for lkcd_lseek(). */ for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: lkcd_print("reached EOF\n"); eof = TRUE; continue; } if (dp->dp_flags & ~(DUMP_DH_COMPRESSED|DUMP_DH_RAW|DUMP_DH_END|LKCD_DUMP_MCLX_V0)) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) lkcd->flags |= LKCD_MCLX; if (dp->dp_size > dh->dh_page_size) { lkcd_print("dp_size > %d: %d\n", dh->dh_page_size, dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_DH_END) { lkcd_print("found DUMP_DH_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(2)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (dh->dh_version & LKCD_DUMP_MCLX_V1) mclx_cache_page_headers_v8(); if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v8(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v8(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v8(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v8(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; struct timeval tv; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else if (dh->dh_magic_number == DUMP_MAGIC_LIVE) lkcd_print("(DUMP_MAGIC_LIVE)\n"); else lkcd_print("(?)\n"); others = 0; lkcd_print(" dh_version: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_version); switch (dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) { case LKCD_DUMP_V1: lkcd_print("%sLKCD_DUMP_V1", others++ ? "|" : ""); break; case LKCD_DUMP_V2: lkcd_print("%sLKCD_DUMP_V2", others++ ? "|" : ""); break; case LKCD_DUMP_V3: lkcd_print("%sLKCD_DUMP_V3", others++ ? "|" : ""); break; case LKCD_DUMP_V5: lkcd_print("%sLKCD_DUMP_V5", others++ ? "|" : ""); break; case LKCD_DUMP_V7: lkcd_print("%sLKCD_DUMP_V7", others++ ? "|" : ""); break; case LKCD_DUMP_V8: lkcd_print("%sLKCD_DUMP_V8", others++ ? "|" : ""); break; case LKCD_DUMP_V9: lkcd_print("%sLKCD_DUMP_V9", others++ ? "|" : ""); break; } if (dh->dh_version & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); if (dh->dh_version & LKCD_DUMP_MCLX_V1) lkcd_print("%sLKCD_DUMP_MCLX_V1", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_level); others = 0; if (dh->dh_dump_level & DUMP_LEVEL_HEADER) lkcd_print("%sDUMP_LEVEL_HEADER", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_KERN) lkcd_print("%sDUMP_LEVEL_KERN", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_USED) lkcd_print("%sDUMP_LEVEL_USED", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_ALL) lkcd_print("%sDUMP_LEVEL_ALL", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_page_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: "); lkcd_print(BITS32() ? "%lld\n" : "%ld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_end); lkcd_print(" dh_num_pages: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && dh->dh_panic_string && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); tv.tv_sec = dh->dh_time.tv_sec; lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(tv.tv_sec)))); lkcd_print("dh_utsname_sysname: %s\n", dh->dh_utsname_sysname); lkcd_print("dh_utsname_nodename: %s\n", dh->dh_utsname_nodename); lkcd_print("dh_utsname_release: %s\n", dh->dh_utsname_release); lkcd_print("dh_utsname_version: %s\n", dh->dh_utsname_version); lkcd_print("dh_utsname_machine: %s\n", dh->dh_utsname_machine); lkcd_print("dh_utsname_domainname: %s\n", dh->dh_utsname_domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_dump_compress: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_compress); others = 0; if (dh->dh_dump_compress == DUMP_COMPRESS_NONE) lkcd_print("%sDUMP_COMPRESS_NONE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_RLE) lkcd_print("%sDUMP_COMPRESS_RLE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_GZIP) lkcd_print("%sDUMP_COMPRESS_GZIP", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_flags: "); others = 0; lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_flags); if (dh->dh_dump_flags & DUMP_FLAGS_NONDISRUPT) lkcd_print("%sDUMP_FLAGS_NONDISRUPT", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_device: "); lkcd_print(BITS32() ? "%lx\n" : "%x\n", dh->dh_dump_device); if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dp->dp_address); lkcd_print(" dp_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dp->dp_size); lkcd_print(" dp_flags: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_DH_COMPRESSED) lkcd_print("DUMP_DH_COMPRESSED", others++); if (dp->dp_flags & DUMP_DH_RAW) lkcd_print("%sDUMP_DH_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_DH_END) lkcd_print("%sDUMP_DH_END", others++ ? "|" : ""); if (dp->dp_flags & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); lkcd_print(")\n"); } void dump_dump_page_v8(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console(BITS32() ? "dp_address: %llx " : "dp_address: %lx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_DH_COMPRESSED) console("DUMP_DH_COMPRESSED", others++); if (flags & DUMP_DH_RAW) console("%sDUMP_DH_RAW", others++ ? "|" : ""); if (flags & DUMP_DH_END) console("%sDUMP_DH_END", others++ ? "|" : ""); if (flags & LKCD_DUMP_MCLX_V0) console("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); console(")\n"); } /* * Read the MCLX-enhanced page header cache. Verify the first one, which * is a pointer to the page header for address 1MB, and take the rest at * blind faith. Note that the page headers do not include the 64K dump * header offset, which must be added to the values found. */ static void mclx_cache_page_headers_v8(void) { int i; uint64_t physaddr1, physaddr2, page_headers[MCLX_PAGE_HEADERS]; dump_page_t dump_page, *dp; ulong granularity; size_t dh_size; if (LKCD_DEBUG(2)) /* dump headers have all been read */ return; if (lkcd->total_pages > MEGABYTES(1))/* greater than 4G not supported */ return; dh_size = sizeof(dump_header_t); if ((((dump_header_t *)lkcd->dump_header)->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9) dh_size += sizeof(uint64_t); if (lseek(lkcd->fd, dh_size, SEEK_SET) == -1) return; if (read(lkcd->fd, page_headers, MCLX_V1_PAGE_HEADER_CACHE) != MCLX_V1_PAGE_HEADER_CACHE) return; dp = &dump_page; /* * Determine the granularity between offsets. */ if (lseek(lkcd->fd, page_headers[0] + lkcd_offset_to_first_page, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if (lseek(lkcd->fd, page_headers[1] + lkcd_offset_to_first_page, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr2 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if ((physaddr1 % MEGABYTES(1)) || (physaddr2 % MEGABYTES(1)) || (physaddr2 < physaddr1)) return; granularity = physaddr2 - physaddr1; for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { if (!page_headers[i]) break; lkcd->curhdroffs = page_headers[i] + lkcd_offset_to_first_page; set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } crash-7.1.4/lkcd_v1.c0000775000000000000000000002112612634305150013016 0ustar rootroot/* lkcd_v1.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #define CONFIG_VMDUMP #include "lkcd_vmdump_v1.h" static dump_header_t dump_header_v1 = { 0 }; static dump_page_t dump_page = { 0 }; /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v1(FILE *fp, int fd) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; lkcd->fd = fd; lkcd->fp = fp; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v1; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; lkcd->dump_header = dh; lkcd->dump_page = dp; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->total_pages = dh->dh_num_pages; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->page_header_size = sizeof(dump_page_t); lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->get_dp_flags = get_dp_flags_v1; lkcd->get_dp_address = get_dp_address_v1; lkcd->get_dp_size = get_dp_size_v1; lkcd->compression = LKCD_DUMP_COMPRESS_RLE; lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: eof = TRUE; continue; } if (!(dp->dp_flags & (DUMP_COMPRESSED|DUMP_RAW|DUMP_END))) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_END) { lkcd_print("found DUMP_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(1)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * console-only output for info regarding current page. */ void dump_dump_page_v1(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console("dp_address: %llx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_COMPRESSED) console("DUMP_COMPRESSED", others++); if (flags & DUMP_RAW) console("%sDUMP_RAW", others++ ? "|" : ""); if (flags & DUMP_END) console("DUMP_END", others++ ? "|" : ""); console(")\n"); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v1(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: %llx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else lkcd_print("(?)\n"); lkcd_print(" dh_version: %d\n", dh->dh_version); lkcd_print(" dh_header_size: %d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: %d\n", dh->dh_dump_level); lkcd_print(" dh_page_size: %d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: %lld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: %llx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: %llx\n", dh->dh_memory_end); lkcd_print(" dh_esp: %lx\n", dh->dh_esp); lkcd_print(" dh_eip: %lx\n", dh->dh_eip); lkcd_print(" dh_num_pages: %d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && dh->dh_panic_string && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print(" dh_utsname:\n"); lkcd_print(" sysname: %s\n", dh->dh_utsname.sysname); lkcd_print(" nodename: %s\n", dh->dh_utsname.nodename); lkcd_print(" release: %s\n", dh->dh_utsname.release); lkcd_print(" version: %s\n", dh->dh_utsname.version); lkcd_print(" machine: %s\n", dh->dh_utsname.machine); lkcd_print(" domainname: %s\n", dh->dh_utsname.domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_regs:\n"); #ifdef PPC lkcd_print(" (PowerPC register display TBD)\n"); #endif #ifdef X86 lkcd_print(" ebx: %lx\n", dh->dh_regs.ebx); lkcd_print(" ecx: %lx\n", dh->dh_regs.ecx); lkcd_print(" edx: %lx\n", dh->dh_regs.edx); lkcd_print(" esi: %lx\n", dh->dh_regs.esi); lkcd_print(" edi: %lx\n", dh->dh_regs.edi); lkcd_print(" eax: %lx\n", dh->dh_regs.eax); lkcd_print(" xds: %x\n", dh->dh_regs.xds); lkcd_print(" xes: %x\n", dh->dh_regs.xes); lkcd_print(" orig_eax: %lx\n", dh->dh_regs.orig_eax); lkcd_print(" eip: %lx\n", dh->dh_regs.eip); lkcd_print(" xcs: %x\n", dh->dh_regs.xcs); lkcd_print(" eflags: %lx\n", dh->dh_regs.eflags); lkcd_print(" esp: %lx\n", dh->dh_regs.esp); lkcd_print(" xss: %x\n", dh->dh_regs.xss); #endif if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: %llx\n", dp->dp_address); lkcd_print(" dp_size: %ld\n", dp->dp_size); lkcd_print(" dp_flags: %lx (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_COMPRESSED) lkcd_print("DUMP_COMPRESSED", others++); if (dp->dp_flags & DUMP_RAW) lkcd_print("%sDUMP_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_END) lkcd_print("DUMP_END", others++ ? "|" : ""); lkcd_print(")\n"); } crash-7.1.4/crash.80000664000000000000000000005333412634305150012523 0ustar rootroot.\" .de CO \dB\\$1\fP \fI\\$2\fP .. .TH CRASH 8 .SH NAME crash \- Analyze Linux crash dump data or a live system .SH SYNOPSIS .B crash [\fIOPTION\fR]... \fINAMELIST MEMORY-IMAGE[@ADDRESS] (dumpfile form)\fR .br .B crash [\fIOPTION\fR]... \fI[NAMELIST] (live system form)\fR .SH DESCRIPTION .B Crash is a tool for interactively analyzing the state of the Linux system while it is running, or after a kernel crash has occurred and a core dump has been created by the .I netdump, .I diskdump, .I LKCD, .I kdump, .I xendump or .I kvmdump facilities. It is loosely based on the SVR4 UNIX crash command, but has been significantly enhanced by completely merging it with the .B gdb(1) debugger. The marriage of the two effectively combines the kernel-specific nature of the traditional UNIX crash utility with the source code level debugging capabilities of .B gdb(1). In the .I dumpfile form, both a NAMELIST and a MEMORY-IMAGE argument must be entered. In the .I live system form, the NAMELIST argument must be entered if the kernel's .I vmlinux file is not located in a known location, such as the .I /usr/lib/debug/lib/modules/ directory. The .B crash utility has also been extended to support the analysis of dumpfiles generated by a crash of the Xen hypervisor. In that case, the NAMELIST argument must be that of the .I xen-syms binary. Live system analysis is not supported for the Xen hypervisor. The .B crash utility command set consists of common kernel core analysis tools such as kernel stack back traces of all processes, source code disassembly, formatted kernel structure and variable displays, virtual memory data, dumps of linked-lists, etc., along with several commands that delve deeper into specific kernel subsystems. Appropriate .B gdb commands may also be entered, which in turn are passed on to the .B gdb module for execution. If desired, commands may be placed in either a .I $HOME/.crashrc file and/or in a .I .crashrc file in the current directory. During initialization, the commands in .I $HOME/.crashrc are executed first, followed by those in the .I ./.crashrc file. The .B crash utility is designed to be independent of Linux version dependencies. When new kernel source code impacts the correct functionality of .B crash and its command set, the utility will be updated to recognize new kernel code changes, while maintaining backwards compatibility with earlier releases. .SH OPTIONS .de BS \fB\\$1\fP\ \fR\\$2\fP .. .TP .BI NAMELIST This is a pathname to an uncompressed kernel image (a .I vmlinux file), or a Xen hypervisor image (a .I xen-syms file) which has been compiled with the "-g" option. If using the .I dumpfile form, a .I vmlinux file may be compressed in either gzip or bzip2 formats. .TP .BI MEMORY-IMAGE[@ADDRESS] A kernel core dump file created by the .I netdump, .I diskdump, .I LKCD .I kdump, .I xendump or .I kvmdump facilities. If a MEMORY-IMAGE argument is not entered, the session will be invoked on the live system, which typically requires root privileges because of the device file used to access system RAM. By default, .I /dev/crash will be used if it exists. If it does not exist, then .I /dev/mem will be used; but if the kernel has been configured with .B CONFIG_STRICT_DEVMEM, then .I /proc/kcore will be used. It is permissible to explicitly enter .I /dev/crash, .I /dev/mem or .I /proc/kcore. An @ADDRESS value must be appended to the MEMORY-IMAGE if the dumpfile is a raw RAM dumpfile that has no header information describing the file contents. Multiple MEMORY-IMAGE@ADDRESS ordered pairs may be entered, with each dumpfile containing a contiguous block of RAM, where the ADDRESS value is the physical start address of the block expressed in hexadecimal. The physical address value(s) will be used to create a temporary ELF header in /var/tmp, which will only exist during the crash session. .TP .BI mapfile If the NAMELIST file is not the same kernel that is running (live system form), or the kernel that was running when the system crashed (dumpfile form), then the .I System.map file of the original kernel should be entered on the command line. .P .BI -h \ [option] .br .BI \--help \ [option] .RS Without an .I option argument, display a .B crash usage help message. If the .I option argument is a .B crash command name, the help page for that command is displayed. If it is the string "input", a page describing the various .B crash command line input options is displayed. If it is the string "output", a page describing command line output options is displayed. If it is the string "all", then all of the possible help messages are displayed. After the help message is displayed, .B crash exits. .RE .TP .B \-s Silently proceed directly to the "crash>" prompt without displaying any version, GPL, or .B crash initialization data during startup, and by default, runtime command output is not passed to any scrolling command. .TP .BI \-i \ file Execute the command(s) contained in .I file prior to displaying the "crash>" prompt for interactive user input. .TP .BI \-d \ num Set the internal debug level. The higher the number, the more debugging data will be printed when .B crash initializes and runs. .TP .B \-S Use .I /boot/System.map as the .I mapfile\fP. .TP .B \-e \fIvi | emacs\fR Set the .B readline(3) command line editing mode to "vi" or "emacs". The default editing mode is "vi". .TP .B \-f Force the usage of a compressed .I vmlinux file if its original name does not start with "vmlinux". .TP .B \-k Indicate that the NAMELIST file is an LKCD "Kerntypes" debuginfo file. .TP .BI -g \ [namelist] Determine if a .I vmlinux or .I xen-syms namelist file contains debugging data. .TP .B \-t Display the system-crash timestamp and exit. .TP .B \-L Attempt to lock all of its virtual address space into memory by calling mlockall(MCL_CURRENT|MCL_FUTURE) during initialization. If the system call fails, an error message will be displayed, but the session continues. .TP .BI \-c \ tty-device Open the .I tty-device as the console used for debug messages. .TP .BI \-p \ page-size If a processor's page size cannot be determined by the dumpfile, and the processor default cannot be used, use .I page-size. .TP .BI \-o \ filename Only used with the MEMORY-IMAGE@ADDRESS format for raw RAM dumpfiles, specifies a filename of a new ELF vmcore that will be created and used as the dumpfile. It will be saved to allow future use as a standalone vmcore, replacing the original raw RAM dumpfile. .P .B -m \fIoption=value\fR .br .B --machdep \fIoption=value\fR .RS Pass an option and value pair to machine-dependent code. These architecture-specific option/pairs should only be required in very rare circumstances: .P .nf X86_64: phys_base= irq_eframe_link= max_physmem_bits= vm=orig (pre-2.6.11 virtual memory address ranges) vm=2.6.11 (2.6.11 and later virtual memory address ranges) vm=xen (Xen kernel virtual memory address ranges) vm=xen-rhel4 (RHEL4 Xen kernel virtual address ranges) PPC64: vm=orig vm=2.6.14 (4-level page tables) IA64: phys_start= init_stack_size= vm=4l (4-level page tables) ARM: phys_base= ARM64: phys_offset= .fi .RE .TP .B \-x Automatically load extension modules from a particular directory. If a directory is specified in the .B CRASH_EXTENSIONS shell environment variable, then that directory will be used. Otherwise .I /usr/lib64/crash/extensions (64-bit architectures) or .I /usr/lib/crash/extensions (32-bit architectures) will be used; if they do not exist, then the .I ./extensions directory will be used. .TP .BI --active Track only the active task on each cpu. .TP .BI --buildinfo Display the crash binary's build date, the user ID of the builder, the hostname of the machine where the build was done, the target architecture, the version number, and the compiler version. .TP .BI --memory_module \ modname Use the .I modname as an alternative kernel module to the .I crash.ko module that creates the .I /dev/crash device. .TP .BI --memory_device \ device Use .I device as an alternative device to the .I /dev/crash, /dev/mem or .I /proc/kcore devices. .TP .BI --log \ dumpfile Dump the contents of the kernel log buffer. A kernel namelist argument is not necessary, but the dumpfile must contain the VMCOREINFO data taken from the original /proc/vmcore ELF header. .TP .B --no_kallsyms Do not use kallsyms-generated symbol information contained within kernel module object files. .TP .B --no_modules Do not access or display any kernel module related information. .TP .B --no_ikconf Do not attempt to read configuration data that was built into kernels configured with .B CONFIG_IKCONFIG. .TP .B --no_data_debug Do not verify the validity of all structure member offsets and structure sizes that it uses. .TP .B --no_kmem_cache Do not initialize the kernel's slab cache infrastructure, and commands that use kmem_cache-related data will not work. .TP .B --no_elf_notes Do not use the registers from the ELF NT_PRSTATUS notes saved in a compressed kdump header for backtraces. .TP .B --kmem_cache_delay Delay the initialization of the kernel's slab cache infrastructure until it is required by a run-time command. .TP .B --readnow Pass this flag to the embedded .B gdb module, which will override its two-stage strategy that it uses for reading symbol tables from the NAMELIST. .TP .B --smp Specify that the system being analyzed is an SMP kernel. .P .B -v .br .B --version .RS Display the version of the .B crash utility, the version of the embedded .B gdb module, GPL information, and copyright notices. .RE .TP .BI --cpus \ number Specify the .I number of cpus in the SMP system being analyzed. .TP .BI --osrelease \ dumpfile Display the OSRELEASE vmcoreinfo string from a kdump .I dumpfile header. .TP .BI --hyper Force the session to be that of a Xen hypervisor. .TP .BI --p2m_mfn \ pfn When a Xen Hypervisor or its dom0 kernel crashes, the dumpfile is typically analyzed with either the Xen hypervisor or the dom0 kernel. It is also possible to analyze any of the guest domU kernels if the pfn_to_mfn_list_list .I pfn value of the guest kernel is passed on the command line along with its NAMELIST and the dumpfile. .TP .BI --xen_phys_start \ physical-address Supply the base physical address of the Xen hypervisor's text and static data for older xendump dumpfiles that did not pass that information in the dumpfile header. .TP .B --zero_excluded If the makedumpfile(8) facility has filtered a compressed kdump dumpfile to exclude various types of non-essential pages, or has marked a compressed or ELF kdump dumpfile as incomplete due to an ENOSPC or other error during its creation, any attempt to read missing pages will fail. With this flag, reads from any of those pages will return zero-filled memory. .TP .B --no_panic Do not attempt to find the task that was running when the kernel crashed. Set the initial context to that of the "swapper" task on cpu 0. .TP .B --more Use .I /bin/more as the command output scroller, overriding the default of .I /usr/bin/less and any settings in either .I ./.crashrc or .I $HOME/.crashrc. .TP .B --less Use .I /usr/bin/less as the command output scroller, overriding any settings in either .I ./.crashrc or .I $HOME/.crashrc. .TP .B --hex Set the default command output radix to 16, overriding the default radix of 10, and any radix settings in either .I ./.crashrc or .I $HOME/.crashrc. .TP .B --dec Set the default command output radix to 10, overriding any radix settings in either .I ./.crashrc or .I $HOME/.crashrc. This is the default radix setting. .TP .B --CRASHPAGER Use the output paging command defined in the .B CRASHPAGER shell environment variable, overriding any settings in either .I ./.crashrc or .I $HOME/.crashrc. .TP .B --no_scroll Do not pass run-time command output to any scrolling command. .TP .B --no_strip Do not strip cloned kernel text symbol names. .TP .B --no_crashrc Do not execute the commands in either .I $HOME/.crashrc or .I ./.crashrc. .TP .BI --mod \ directory When loading the debuginfo data of kernel modules with the .I mod -S command, search for their object files in .I directory instead of in the standard location. .TP .BI --kaslr \ offset | auto If an x86_64 kernel was configured with .B CONFIG_RANDOMIZE_BASE, the offset value is equal to the difference between the symbol values compiled into the vmlinux file and their relocated KASLR values. If set to auto, the KASLR offset value will be automatically calculated. .TP .BI --reloc \ size When analyzing live x86 kernels that were configured with a .B CONFIG_PHYSICAL_START value that is larger than its .B CONFIG_PHYSICAL_ALIGN value, then it will be necessary to enter a relocation size equal to the difference between the two values. .TP .BI --hash \ count Set the number of internal hash queue heads used for list gathering and verification. The default count is 32768. .TP .B --minimal Bring up a session that is restricted to the .I log, dis, rd, sym, eval, set and .I exit commands. This option may provide a way to extract some minimal/quick information from a corrupted or truncated dumpfile, or in situations where one of the several kernel subsystem initialization routines would abort the .B crash session. .TP .BI --kvmhost \ [32|64] When examining an x86 KVM guest dumpfile, this option specifies that the KVM host that created the dumpfile was an x86 (32-bit) or an x86_64 (64-bit) machine, overriding the automatically determined value. .TP .BI --kvmio \ override the automatically-calculated KVM guest I/O hole size. .TP .BI --offline \ [show|hide] Show or hide command output that is related to offline cpus. The default setting is show. .SH COMMANDS Each .B crash command generally falls into one of the following categories: .TP .I Symbolic display Displays of kernel text/data, which take full advantage of the power of .B gdb to format and display data structures symbolically. .TP .I System state The majority of .B crash commands consist of a set of "kernel-aware" commands, which delve into various kernel subsystems on a system-wide or per-task basis. .TP .I Utility functions A set of useful helper commands serving various purposes, some simple, others quite powerful. .TP .I Session control Commands that control the .B crash session itself. .PP The following alphabetical list consists of a very simple overview of each .B crash command. However, since individual commands often have several options resulting in significantly different output, it is suggested that the full description of each command be viewed by executing .I crash\ -h\ \fI\fP, or during a .B crash session by simply entering .B \fIhelp command\fP. .TP .I * "pointer to" is shorthand for either the .I struct or .I union commands. It displays the contents of a kernel structure or union. .TP .I alias creates a single-word alias for a command. .TP .I ascii displays an ascii chart or translates a numeric value into its ascii components. .TP .I bt displays a task's kernel-stack backtrace. If it is given the .I \-a option, it displays the stack traces of the active tasks on all CPUs. It is often used with the .I foreach command to display the backtraces of all tasks with one command. .TP .I btop translates a byte value (physical offset) to its page number. .TP .I dev displays data concerning the character and block device assignments, I/O port usage, I/O memory usage, and PCI device data. .TP .I dis disassembles memory, either entire kernel functions, from a location for a specified number of instructions, or from the start of a function up to a specified memory location. .TP .I eval evaluates an expression or numeric type and displays the result in hexadecimal, decimal, octal and binary. .TP .I exit causes .B crash to exit. .TP .I extend dynamically loads or unloads .B crash shared object extension modules. .TP .I files displays information about open files in a context. .TP .I foreach repeats a specified command for the specified (or all) tasks in the system. .TP .I fuser displays the tasks using the specified file or socket. .TP .I gdb passes its argument to the embedded .B gdb module. It is useful for executing .B gdb commands that have the same name as .B crash commands. .TP .I help alone displays the command menu; if followed by a command name, a full description of a command, its options, and examples are displayed. Its output is far more complete and useful than this man page. .TP .I ipcs displays data about the System V IPC facilities. .TP .I irq displays data concerning interrupt request numbers and bottom-half interrupt handling. .TP .I kmem displays information about the use of kernel memory. .TP .I list displays the contents of a linked list. .TP .I log displays the kernel log_buf contents in chronological order. .TP .I mach displays data specific to the machine type. .TP .I mod displays information about the currently installed kernel modules, or adds or deletes symbolic or debugging information about specified kernel modules. .TP .I mount displays information about the currently-mounted filesystems. .TP .I net display various network related data. .TP .I p passes its arguments to the .B gdb "print" command for evaluation and display. .TP .I ps displays process status for specified, or all, processes in the system. .TP .I pte translates the hexadecimal contents of a PTE into its physical page address and page bit settings. .TP .I ptob translates a page frame number to its byte value. .TP .I ptov translates a hexadecimal physical address into a kernel virtual address. .TP .I q is an alias for the "exit" command. .TP .I rd displays the contents of memory, with the output formatted in several different manners. .TP .I repeat repeats a command indefinitely, optionally delaying a given number of seconds between each command execution. .TP .I runq displays the tasks on the run queue. .TP .I search searches a range of user or kernel memory space for given value. .TP .I set either sets a new context, or gets the current context for display. .TP .I sig displays signal-handling data of one or more tasks. .TP .I struct displays either a structure definition or the contents of a kernel structure at a specified address. .TP .I swap displays information about each configured swap device. .TP .I sym translates a symbol to its virtual address, or a static kernel virtual address to its symbol -- or to a symbol-plus-offset value, if appropriate. .TP .I sys displays system-specific data. .TP .I task displays the contents of a task_struct. .TP .I tree displays the contents of a red-black tree or a radix tree. .TP .I timer displays the timer queue entries, both old- and new-style, in chronological order. .TP .I union is similar to the .I struct command, except that it works on kernel unions. .TP .I vm displays basic virtual memory information of a context. .TP .I vtop translates a user or kernel virtual address to its physical address. .TP .I waitq walks the wait queue list displaying the tasks which are blocked on the specified wait queue. .TP .I whatis displays the definition of structures, unions, typedefs or text/data symbols. .TP .I wr modifies the contents of memory on a live system. It can only be used if .I /dev/mem is the device file being used to access system RAM, and should obviously be used with great care. .PP When .B crash is invoked with a Xen hypervisor binary as the NAMELIST, the command set is slightly modified. The .I *, alias, ascii, bt, dis, eval, exit, extend, .I gdb, help, list, log, p, pte, rd, repeat, .I search, set, struct, sym, sys, union, .I whatis, wr and .I q commands are the same as above. The following commands are specific to the Xen hypervisor: .TP .I domain displays the contents of the domain structure for selected, or all, domains. .TP .I doms displays domain status for selected, or all, domains. .TP .I dumpinfo displays Xen dump information for selected, or all, cpus. .TP .I pcpus displays physical cpu information for selected, or all, cpus. .TP .I vcpus displays vcpu status for selected, or all, vcpus. .SH FILES .TP .I .crashrc Initialization commands. The file can be located in the user's .B HOME directory and/or the current directory. Commands found in the .I .crashrc file in the .B HOME directory are executed before those in the current directory's .I .crashrc file. .SH ENVIRONMENT .TP .B EDITOR Command input is read using .BR readline(3). If .B EDITOR is set to .I emacs or .I vi then suitable keybindings are used. If .B EDITOR is not set, then .I vi is used. This can be overridden by .B set vi or .B set emacs commands located in a .IR .crashrc file, or by entering .B -e emacs on the .B crash command line. .TP .B CRASHPAGER If .B CRASHPAGER is set, its value is used as the name of the program to which command output will be sent. If not, then command output is sent to .B /usr/bin/less -E -X by default. .TP .B CRASH_MODULE_PATH Specifies an alternative directory tree to search for kernel module object files. .TP .B CRASH_EXTENSIONS Specifies a directory containing extension modules that will be loaded automatically if the .B -x command line option is used. .SH NOTES .PP If .B crash does not work, look for a newer version: kernel evolution frequently makes .B crash updates necessary. .PP The command .B set scroll off will cause output to be sent directly to the terminal rather than through a paging program. This is useful, for example, if you are running .B crash in a window of .BR emacs . .SH AUTHOR Dave Anderson wrote .B crash. .TP Jay Fenlason and Dave Anderson wrote this man page. .SH "SEE ALSO" .PP The .I help command within .B crash provides more complete and accurate documentation than this man page. .PP .I http://people.redhat.com/anderson - the home page of the .B crash utility. .PP .BR netdump (8), .BR gdb (1), .BR makedumpfile(8) crash-7.1.4/README0000644000000000000000000003625112634305150012207 0ustar rootroot CORE ANALYSIS SUITE The core analysis suite is a self-contained tool that can be used to investigate either live systems, kernel core dumps created from dump creation facilities such as kdump, kvmdump, xendump, the netdump and diskdump packages offered by Red Hat, the LKCD kernel patch, the mcore kernel patch created by Mission Critical Linux, as well as other formats created by manufacturer-specific firmware. o The tool is loosely based on the SVR4 crash command, but has been completely integrated with gdb in order to be able to display formatted kernel data structures, disassemble source code, etc. o The current set of available commands consist of common kernel core analysis tools such as a context-specific stack traces, source code disassembly, kernel variable displays, memory display, dumps of linked-lists, etc. In addition, any gdb command may be entered, which in turn will be passed onto the gdb module for execution. o There are several commands that delve deeper into specific kernel subsystems, which also serve as templates for kernel developers to create new commands for analysis of a specific area of interest. Adding a new command is a simple affair, and a quick recompile adds it to the command menu. o The intent is to make the tool independent of Linux version dependencies, building in recognition of major kernel code changes so as to adapt to new kernel versions, while maintaining backwards compatibility. A whitepaper with complete documentation concerning the use of this utility can be found here: http://people.redhat.com/anderson/crash_whitepaper These are the current prerequisites: o At this point, x86, ia64, x86_64, ppc64, ppc, arm, arm64, alpha, mips, s390 and s390x-based kernels are supported. Other architectures may be addressed in the future. o One size fits all -- the utility can be run on any Linux kernel version version dating back to 2.2.5-15. A primary design goal is to always maintain backwards-compatibility. o In order to contain debugging data, the top-level kernel Makefile's CFLAGS definition must contain the -g flag. Typically distributions will contain a package containing a vmlinux file with full debuginfo data. If not, the kernel must be rebuilt: For 2.2 kernels that are not built with -g, change the following line: CFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer to: CFLAGS = -g -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer For 2.4 kernels that are not built with -g, change the following line: CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strict-aliasing to: CFLAGS := -g $(CPPFLAGS) -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strict-aliasing For 2.6 and later kernels that are not built with -g, the kernel should be configured with CONFIG_DEBUG_INFO enabled, which in turn will add the -g flag to the CFLAGS setting in the kernel Makefile. After the kernel is re-compiled, the uncompressed "vmlinux" kernel that is created in the top-level kernel build directory must be saved. To build the crash utility: $ tar -xf crash-7.1.4.tar.gz $ cd crash-7.1.4 $ make The initial build will take several minutes because the embedded gdb module must be configured and built. Alternatively, the crash source RPM file may be installed and built, and the resultant crash binary RPM file installed. The crash binary can only be used on systems of the same architecture as the host build system. There are a few optional manners of building the crash binary: o On an x86_64 host, a 32-bit x86 binary that can be used to analyze 32-bit x86 dumpfiles may be built by typing "make target=X86". o On an x86 or x86_64 host, a 32-bit x86 binary that can be used to analyze 32-bit arm dumpfiles may be built by typing "make target=ARM". o On an x86 or x86_64 host, a 32-bit x86 binary that can be used to analyze 32-bit mips dumpfiles may be built by typing "make target=MIPS". o On an ppc64 host, a 32-bit ppc binary that can be used to analyze 32-bit ppc dumpfiles may be built by typing "make target=PPC". o On an x86_64 host, an x86_64 binary that can be used to analyze arm64 dumpfiles may be built by typing "make target=ARM64". Traditionally when vmcores are compressed via the makedumpfile(8) facility the libz compression library is used, and by default the crash utility only supports libz. Recently makedumpfile has been enhanced to optionally use either the LZO or snappy compression libraries. To build crash with either or both of those libraries, type "make lzo" or "make snappy". All of the alternate build commands above are "sticky" in that the special "make" targets only have to be entered one time; all subsequent builds will follow suit. If the tool is run against a kernel dumpfile, two arguments are required, the uncompressed kernel name and the kernel dumpfile name. If run on a live system, only the kernel name is required, because /dev/mem will be used as the "dumpfile". On Red Hat or Fedora kernels where the /dev/mem device is restricted, the /dev/crash memory driver will be used. If neither /dev/mem or /dev/crash are available, then /proc/kcore will be be used as the live memory source. If /proc/kcore is also restricted, then the Red Hat /dev/crash driver may be compiled and installed; its source is included in the crash-7.1.4/memory_driver subdirectory. If the kernel file is stored in /boot, /, /boot/efi, or in any /usr/src or /usr/lib/debug/lib/modules subdirectory, then no command line arguments are required -- the first kernel found that matches /proc/version will be used as the namelist. For example, invoking crash on a live system would look like this: $ crash crash 7.1.4 Copyright (C) 2002-2015 Red Hat, Inc. Copyright (C) 2004, 2005, 2006, 2010 IBM Corporation Copyright (C) 1999-2006 Hewlett-Packard Co Copyright (C) 2005, 2006, 2011, 2012 Fujitsu Limited Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. Copyright (C) 2005, 2011 NEC Corporation Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. This program is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Enter "help copying" to see the conditions. This program has absolutely no warranty. Enter "help warranty" for details. GNU gdb 7.6 Copyright 2013 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Type "show copying" and "show warranty" for details. This GDB was configured as "i686-pc-linux-gnu"... KERNEL: /boot/vmlinux DUMPFILE: /dev/mem CPUS: 1 DATE: Wed Dec 16 10:59:36 2015 UPTIME: 10 days, 22:55:18 LOAD AVERAGE: 0.08, 0.03, 0.01 TASKS: 42 NODENAME: ha2.mclinux.com RELEASE: 2.4.0-test10 VERSION: #11 SMP Thu Nov 4 15:09:25 EST 2000 MACHINE: i686 (447 MHz) MEMORY: 128 MB PID: 3621 COMMAND: "crash" TASK: c463c000 CPU: 0 STATE: TASK_RUNNING (ACTIVE) crash> help * files mach repeat timer alias foreach mod runq tree ascii fuser mount search union bt gdb net set vm btop help p sig vtop dev ipcs ps struct waitq dis irq pte swap whatis eval kmem ptob sym wr exit list ptov sys q extend log rd task crash version: 7.1.4 gdb version: 7.6 For help on any command above, enter "help ". For help on input options, enter "help input". For help on output options, enter "help output". crash> When run on a dumpfile, both the kernel namelist and dumpfile must be entered on the command line. For example, when run on a core dump created by the Red Hat netdump or diskdump facilities: $ crash vmlinux vmcore crash 7.1.4 Copyright (C) 2002-2015 Red Hat, Inc. Copyright (C) 2004, 2005, 2006, 2010 IBM Corporation Copyright (C) 1999-2006 Hewlett-Packard Co Copyright (C) 2005, 2006, 2011, 2012 Fujitsu Limited Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. Copyright (C) 2005, 2011 NEC Corporation Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. This program is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Enter "help copying" to see the conditions. This program has absolutely no warranty. Enter "help warranty" for details. GNU gdb 7.6 Copyright 2013 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Type "show copying" and "show warranty" for details. This GDB was configured as "i686-pc-linux-gnu"... KERNEL: vmlinux DUMPFILE: vmcore CPUS: 4 DATE: Tue Mar 2 13:57:09 2004 UPTIME: 00:02:40 LOAD AVERAGE: 2.24, 0.96, 0.37 TASKS: 70 NODENAME: pro1.lab.boston.redhat.com RELEASE: 2.6.3-2.1.214.11smp VERSION: #1 SMP Tue Mar 2 10:58:27 EST 2004 MACHINE: i686 (2785 Mhz) MEMORY: 512 MB PANIC: "Oops: 0002 [#1]" (check log for details) PID: 0 COMMAND: "swapper" TASK: 22fa200 (1 of 4) [THREAD_INFO: 2356000] CPU: 0 STATE: TASK_RUNNING (PANIC) crash> The tool's environment is context-specific. On a live system, the default context is the command itself; on a dump the default context will be the task that panicked. The most commonly-used commands are: set - set a new task context by pid, task address, or cpu. bt - backtrace of the current context, or as specified with arguments. p - print the contents of a kernel variable. rd - read memory, which may be either kernel virtual, user virtual, or physical. ps - simple process listing. log - dump the kernel log_buf. struct - print the contents of a structure at a specified address. foreach - execute a command on all tasks, or those specified, in the system. Detailed help concerning the use of each of the commands in the menu above may be displayed by entering "help command", where "command" is one of those listed above. Rather than getting bogged down in details here, simply run the help command on each of the commands above. Note that many commands have multiple options so as to avoid the proliferation of command names. Command output may be piped to external commands or redirected to files. Enter "help output" for details. The command line history mechanism allows for command-line recall and command-line editing. Input files containing a set of crash commands may be substituted for command-line input. Enter "help input" for details. Note that a .crashrc file (or .rc if the name has been changed), may contain any number of "set" or "alias" commands -- see the help pages on those two commands for details. Lastly, if a command is entered that is not recognized, it is checked against the kernel's list of variables, structure, union or typedef names, and if found, the command is passed to "p", "struct", "union" or "whatis". That being the case, as long as a kernel variable/structure/union name is different than any of the current commands. (1) A kernel variable can be dumped by simply entering its name: crash> init_mm init_mm = $2 = { mmap = 0xc022d540, mmap_avl = 0x0, mmap_cache = 0x0, pgd = 0xc0101000, count = { counter = 0x6 }, map_count = 0x1, mmap_sem = { count = { counter = 0x1 }, waking = 0x0, wait = 0x0 }, context = 0x0, start_code = 0xc0000000, end_code = 0xc022b4c8, end_data = c0250388, ... (2) A structure or can be dumped simply by entering its name and address: crash> vm_area_struct c5ba3910 struct vm_area_struct { vm_mm = 0xc3ae3210, vm_start = 0x821b000, vm_end = 0x8692000, vm_next = 0xc5ba3890, vm_page_prot = { pgprot = 0x25 }, vm_flags = 0x77, vm_avl_height = 0x4, vm_avl_left = 0xc0499540, vm_avl_right = 0xc0499f40, vm_next_share = 0xc04993c0, vm_pprev_share = 0xc0499060, vm_ops = 0x0, vm_offset = 0x0, vm_file = 0x0, vm_pte = 0x0 } The crash utility has been designed to facilitate the task of adding new commands. New commands may be permanently compiled into the crash executable, or dynamically added during runtime using shared object files. To permanently add a new command to the crash executable's menu: 1. For a command named "xxx", put a reference to cmd_xxx() in defs.h. 2. Add cmd_xxx into the base_command_table[] array in global_data.c. 3. Write cmd_xxx(), putting it in one of the appropriate files. Look at the other commands for guidance on getting symbolic data, reading memory, displaying data, etc... 4. Recompile and run. Note that while the initial compile of crash, which configures and compiles the gdb module, takes several minutes, subsequent re-compiles to do such things as add new commands or fix bugs just takes a few seconds. Alternatively, you can create shared object library files consisting of crash command extensions, that can be dynamically linked into the crash executable during runtime or during initialization. This will allow the same shared object to be used with subsequent crash releases without having to re-merge the command's code into each new set of crash sources. The dynamically linked-in commands will automatically show up in the crash help menu. For details, enter "help extend" during runtime, or enter "crash -h extend" from the shell command line. crash-7.1.4/xen_dom0.c0000664000000000000000000001521112634305150013177 0ustar rootroot/* xen_dom0.c * * Copyright (C) 2015 David Anderson * Copyright (C) 2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: David Anderson */ #include "defs.h" #include "xen_dom0.h" static struct xen_kdump_data xen_kdump_data = { 0 }; static struct xen_kdump_data *xkd = &xen_kdump_data; void dump_xen_kdump_data(FILE *fp) { int i, others; fprintf(fp, " xen_kdump_data: %s\n", XEN_CORE_DUMPFILE() ? " " : "(unused)"); if (!XEN_CORE_DUMPFILE()) return; fprintf(fp, " flags: %lx (", xkd->flags); others = 0; if (xkd->flags & KDUMP_P2M_INIT) fprintf(fp, "%sKDUMP_P2M_INIT", others++ ? "|" : ""); if (xkd->flags & KDUMP_CR3) fprintf(fp, "%sKDUMP_CR3", others++ ? "|" : ""); if (xkd->flags & KDUMP_MFN_LIST) fprintf(fp, "%sKDUMP_MFN_LIST", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " p2m_mfn: %lx\n", xkd->p2m_mfn); fprintf(fp, " cr3: %lx\n", xkd->cr3); fprintf(fp, " last_mfn_read: %lx\n", xkd->last_mfn_read); fprintf(fp, " last_pmd_read: %lx\n", xkd->last_pmd_read); fprintf(fp, " page: %lx\n", (ulong)xkd->page); fprintf(fp, " accesses: %ld\n", xkd->accesses); fprintf(fp, " cache_hits: %ld ", xkd->cache_hits); if (xkd->accesses) fprintf(fp, "(%ld%%)", xkd->cache_hits * 100 / xkd->accesses); fprintf(fp, "\n p2m_frames: %d\n", xkd->p2m_frames); fprintf(fp, " xen_phys_start: %lx\n", xkd->xen_phys_start); fprintf(fp, " xen_major_version: %d\n", xkd->xen_major_version); fprintf(fp, " xen_minor_version: %d\n", xkd->xen_minor_version); fprintf(fp, " p2m_mfn_frame_list: %lx\n", (ulong)xkd->p2m_mfn_frame_list); for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); if (i) fprintf(fp, "\n"); } void process_xen_note(ulong type, void *data, size_t sz) { ulong *up = (ulong*) data; unsigned words = sz / sizeof(ulong); pc->flags |= XEN_CORE; xkd->last_mfn_read = UNINITIALIZED; xkd->last_pmd_read = UNINITIALIZED; if (type == NT_XEN_KDUMP_CR3) error(WARNING, "obsolete Xen n_type: %lx (NT_XEN_KDUMP_CR3)\n\n", type); if (type == NT_XEN_KDUMP_CR3 && words == 1) { xkd->flags |= KDUMP_CR3; /* * Use the first cr3 found. */ if (!xkd->cr3) xkd->cr3 = *up; } else { xkd->flags |= KDUMP_MFN_LIST; /* * If already set, overridden with --pfm_mfn */ if (!xkd->p2m_mfn) xkd->p2m_mfn = up[words-1]; if (words > 9 && !xkd->xen_phys_start) xkd->xen_phys_start = up[words-2]; xkd->xen_major_version = up[0]; xkd->xen_minor_version = up[1]; } } /* * Override the dom0 p2m mfn in the XEN_ELFNOTE_CRASH_INFO note * in order to initiate a crash session of a guest kernel. */ void xen_kdump_p2m_mfn(char *arg) { ulong value; int errflag; errflag = 0; value = htol(arg, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { xen_kdump_data.p2m_mfn = value; if (CRASHDEBUG(1)) error(INFO, "xen_kdump_data.p2m_mfn override: %lx\n", value); } else error(WARNING, "invalid p2m_mfn argument: %s\n", arg); } /* * Fujitsu dom0/HV sadump-generated dumpfile, which requires * the --p2m_mfn command line argument. */ int is_sadump_xen(void) { if (xen_kdump_data.p2m_mfn) { if (!XEN_CORE_DUMPFILE()) { pc->flags |= XEN_CORE; xkd->last_mfn_read = UNINITIALIZED; xkd->last_pmd_read = UNINITIALIZED; xkd->flags |= KDUMP_MFN_LIST; } return TRUE; } return FALSE; } void set_xen_phys_start(char *arg) { ulong value; int errflag = 0; value = htol(arg, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) xen_kdump_data.xen_phys_start = value; else error(WARNING, "invalid xen_phys_start argument: %s\n", arg); } ulong xen_phys_start(void) { return xkd->xen_phys_start; } int xen_major_version(void) { return xkd->xen_major_version; } int xen_minor_version(void) { return xkd->xen_minor_version; } struct xen_kdump_data * get_xen_kdump_data(void) { return xkd; } /* * Translate a xen domain's pseudo-physical address into the * xen machine address. Since there's no compression involved, * just the last phys_to_machine_mapping[] page read is cached, * which essentially caches 1024 p2m translations. */ physaddr_t xen_kdump_p2m(physaddr_t pseudo) { ulong pfn, mfn_frame; ulong *mfnptr; ulong mfn_idx, frame_idx; physaddr_t paddr; if (pc->curcmd_flags & XEN_MACHINE_ADDR) return pseudo; if (!(xkd->flags & KDUMP_P2M_INIT)) { if (!machdep->xen_kdump_p2m_create) error(FATAL, "xen kdump dumpfiles not supported on this architecture\n"); if ((xkd->page = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc xen kdump data page\n"); if (!machdep->xen_kdump_p2m_create(xkd)) error(FATAL, "cannot create xen kdump pfn-to-mfn mapping\n"); xkd->flags |= KDUMP_P2M_INIT; } #ifdef IA64 return ia64_xen_kdump_p2m(xkd, pseudo); #endif xkd->accesses++; pfn = (ulong)BTOP(pseudo); mfn_idx = pfn / (PAGESIZE()/sizeof(ulong)); frame_idx = pfn % (PAGESIZE()/sizeof(ulong)); if (mfn_idx >= xkd->p2m_frames) { if (CRASHDEBUG(8)) fprintf(fp, "xen_kdump_p2m: paddr/pfn: %llx/%lx: " "mfn_idx nonexistent\n", (ulonglong)pseudo, pfn); return P2M_FAILURE; } mfn_frame = xkd->p2m_mfn_frame_list[mfn_idx]; if (mfn_frame == xkd->last_mfn_read) xkd->cache_hits++; else { int res; if (CRASHDEBUG(8)) fprintf(fp, "xen_kdump_p2m: paddr/pfn: %llx/%lx: " "read mfn_frame: %llx\n", (ulonglong)pseudo, pfn, PTOB(mfn_frame)); pc->curcmd_flags |= XEN_MACHINE_ADDR; res = readmem((physaddr_t)PTOB(mfn_frame), PHYSADDR, xkd->page, PAGESIZE(), "xen_kdump_p2m mfn frame", RETURN_ON_ERROR); pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (!res) return P2M_FAILURE; } xkd->last_mfn_read = mfn_frame; mfnptr = ((ulong *)(xkd->page)) + frame_idx; paddr = (physaddr_t)PTOB((ulonglong)(*mfnptr)); paddr |= PAGEOFFSET(pseudo); if (CRASHDEBUG(7)) fprintf(fp, "xen_kdump_p2m(%llx): mfn_idx: %ld frame_idx: %ld" " mfn_frame: %lx mfn: %lx => %llx\n", (ulonglong)pseudo, mfn_idx, frame_idx, mfn_frame, *mfnptr, (ulonglong)paddr); return paddr; } crash-7.1.4/unwind_i.h0000664000000000000000000001476612634305150013325 0ustar rootroot/* * Copyright (C) 2000, 2002 Hewlett-Packard Co * David Mosberger-Tang */ /* * unwind_i.h * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * arch/ia64/kernel/unwind_i.h (kernel-2.4.18-6.23) */ /* * Kernel unwind support. */ #define UNW_VER(x) ((x) >> 48) #define UNW_FLAG_MASK 0x0000ffff00000000 #define UNW_FLAG_OSMASK 0x0000f00000000000 #define UNW_FLAG_EHANDLER(x) ((x) & 0x0000000100000000L) #define UNW_FLAG_UHANDLER(x) ((x) & 0x0000000200000000L) #define UNW_LENGTH(x) ((x) & 0x00000000ffffffffL) enum unw_register_index { /* primary unat: */ UNW_REG_PRI_UNAT_GR, UNW_REG_PRI_UNAT_MEM, /* register stack */ UNW_REG_BSP, /* register stack pointer */ UNW_REG_BSPSTORE, UNW_REG_PFS, /* previous function state */ UNW_REG_RNAT, /* memory stack */ UNW_REG_PSP, /* previous memory stack pointer */ /* return pointer: */ UNW_REG_RP, /* preserved registers: */ UNW_REG_R4, UNW_REG_R5, UNW_REG_R6, UNW_REG_R7, UNW_REG_UNAT, UNW_REG_PR, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_B1, UNW_REG_B2, UNW_REG_B3, UNW_REG_B4, UNW_REG_B5, UNW_REG_F2, UNW_REG_F3, UNW_REG_F4, UNW_REG_F5, UNW_REG_F16, UNW_REG_F17, UNW_REG_F18, UNW_REG_F19, UNW_REG_F20, UNW_REG_F21, UNW_REG_F22, UNW_REG_F23, UNW_REG_F24, UNW_REG_F25, UNW_REG_F26, UNW_REG_F27, UNW_REG_F28, UNW_REG_F29, UNW_REG_F30, UNW_REG_F31, UNW_NUM_REGS }; struct unw_info_block { u64 header; u64 desc[0]; /* unwind descriptors */ /* personality routine and language-specific data follow behind descriptors */ }; struct unw_table_entry { u64 start_offset; u64 end_offset; u64 info_offset; }; struct unw_table { struct unw_table *next; /* must be first member! */ const char *name; unsigned long gp; /* global pointer for this load-module */ unsigned long segment_base; /* base for offsets in the unwind table entries */ unsigned long start; unsigned long end; const struct unw_table_entry *array; unsigned long length; }; enum unw_where { UNW_WHERE_NONE, /* register isn't saved at all */ UNW_WHERE_GR, /* register is saved in a general register */ UNW_WHERE_FR, /* register is saved in a floating-point register */ UNW_WHERE_BR, /* register is saved in a branch register */ UNW_WHERE_SPREL, /* register is saved on memstack (sp-relative) */ UNW_WHERE_PSPREL, /* register is saved on memstack (psp-relative) */ /* * At the end of each prologue these locations get resolved to * UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively: */ UNW_WHERE_SPILL_HOME, /* register is saved in its spill home */ UNW_WHERE_GR_SAVE /* register is saved in next general register */ }; #define UNW_WHEN_NEVER 0x7fffffff struct unw_reg_info { unsigned long val; /* save location: register number or offset */ enum unw_where where; /* where the register gets saved */ int when; /* when the register gets saved */ }; struct unw_reg_state { struct unw_reg_state *next; /* next (outer) element on state stack */ struct unw_reg_info reg[UNW_NUM_REGS]; /* register save locations */ }; struct unw_labeled_state { struct unw_labeled_state *next; /* next labeled state (or NULL) */ unsigned long label; /* label for this state */ struct unw_reg_state saved_state; }; struct unw_state_record { unsigned int first_region : 1; /* is this the first region? */ unsigned int done : 1; /* are we done scanning descriptors? */ unsigned int any_spills : 1; /* got any register spills? */ unsigned int in_body : 1; /* are we inside a body (as opposed to a prologue)? */ unsigned long flags; /* see UNW_FLAG_* in unwind.h */ u8 *imask; /* imask of spill_mask record or NULL */ unsigned long pr_val; /* predicate values */ unsigned long pr_mask; /* predicate mask */ long spill_offset; /* psp-relative offset for spill base */ int region_start; int region_len; int epilogue_start; int epilogue_count; int when_target; u8 gr_save_loc; /* next general register to use for saving a register */ u8 return_link_reg; /* branch register in which the return link is passed */ struct unw_labeled_state *labeled_states; /* list of all labeled states */ struct unw_reg_state curr; /* current state */ }; enum unw_nat_type { UNW_NAT_NONE, /* NaT not represented */ UNW_NAT_VAL, /* NaT represented by NaT value (fp reg) */ UNW_NAT_MEMSTK, /* NaT value is in unat word at offset OFF */ UNW_NAT_REGSTK /* NaT is in rnat */ }; enum unw_insn_opcode { UNW_INSN_ADD, /* s[dst] += val */ UNW_INSN_ADD_PSP, /* s[dst] = (s.psp + val) */ UNW_INSN_ADD_SP, /* s[dst] = (s.sp + val) */ UNW_INSN_MOVE, /* s[dst] = s[val] */ UNW_INSN_MOVE2, /* s[dst] = s[val]; s[dst+1] = s[val+1] */ UNW_INSN_MOVE_STACKED, /* s[dst] = ia64_rse_skip(*s.bsp, val) */ UNW_INSN_SETNAT_MEMSTK, /* s[dst+1].nat.type = MEMSTK; s[dst+1].nat.off = *s.pri_unat - s[dst] */ UNW_INSN_SETNAT_TYPE, /* s[dst+1].nat.type = val */ UNW_INSN_LOAD, /* s[dst] = *s[val] */ UNW_INSN_MOVE_SCRATCH, /* s[dst] = scratch reg "val" */ }; struct unw_insn { unsigned int opc : 4; unsigned int dst : 9; signed int val : 19; }; /* * Preserved general static registers (r2-r5) give rise to two script * instructions; everything else yields at most one instruction; at * the end of the script, the psp gets popped, accounting for one more * instruction. */ #define UNW_MAX_SCRIPT_LEN (UNW_NUM_REGS + 5) struct unw_script { unsigned long ip; /* ip this script is for */ unsigned long pr_mask; /* mask of predicates script depends on */ unsigned long pr_val; /* predicate values this script is for */ #ifndef REDHAT rwlock_t lock; #endif /* !REDHAT */ unsigned int flags; /* see UNW_FLAG_* in unwind.h */ #ifndef REDHAT unsigned short lru_chain; /* used for least-recently-used chain */ unsigned short coll_chain; /* used for hash collisions */ unsigned short hint; /* hint for next script to try (or -1) */ #endif /* !REDHAT */ unsigned short count; /* number of instructions in script */ struct unw_insn insn[UNW_MAX_SCRIPT_LEN]; }; crash-7.1.4/task.c0000775000000000000000000101215512634305150012440 0ustar rootroot/* task.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static ulong get_panic_context(void); static int sort_by_pid(const void *, const void *); static void show_ps(ulong, struct psinfo *); static struct task_context *panic_search(void); static void allocate_task_space(int); static void refresh_fixed_task_table(void); static void refresh_unlimited_task_table(void); static void refresh_pidhash_task_table(void); static void refresh_pid_hash_task_table(void); static void refresh_hlist_task_table(void); static void refresh_hlist_task_table_v2(void); static void refresh_hlist_task_table_v3(void); static void refresh_active_task_table(void); static struct task_context *store_context(struct task_context *, ulong, char *); static void refresh_context(ulong, ulong); static ulong parent_of(ulong); static void parent_list(ulong); static void child_list(ulong); static void initialize_task_state(void); static void dump_task_states(void); static void show_ps_data(ulong, struct task_context *, struct psinfo *); static void show_task_times(struct task_context *, ulong); static void show_task_args(struct task_context *); static void show_task_rlimit(struct task_context *); static void show_tgid_list(ulong); static int compare_start_time(const void *, const void *); static int start_time_timespec(void); static ulonglong convert_start_time(ulonglong, ulonglong); static ulong get_dumpfile_panic_task(void); static ulong get_active_set_panic_task(void); static void populate_panic_threads(void); static int verify_task(struct task_context *, int); static ulong get_idle_task(int, char *); static ulong get_curr_task(int, char *); static long rq_idx(int); static long cpu_idx(int); static void dump_runq(void); static void dump_on_rq_timestamp(void); static void dump_on_rq_milliseconds(void); static void dump_runqueues(void); static void dump_prio_array(int, ulong, char *); static void dump_task_runq_entry(struct task_context *, int); static void print_group_header_fair(int, ulong, void *); static void print_parent_task_group_fair(void *, int); static int dump_tasks_in_lower_dequeued_cfs_rq(int, ulong, int, struct task_context *); static int dump_tasks_in_cfs_rq(ulong); static int dump_tasks_in_task_group_cfs_rq(int, ulong, int, struct task_context *); static void dump_on_rq_tasks(void); static void cfs_rq_offset_init(void); static void task_group_offset_init(void); static void dump_CFS_runqueues(void); static void print_group_header_rt(ulong, void *); static void print_parent_task_group_rt(void *, int); static int dump_tasks_in_lower_dequeued_rt_rq(int, ulong, int); static int dump_RT_prio_array(ulong, char *); static void dump_tasks_in_task_group_rt_rq(int, ulong, int); static char *get_task_group_name(ulong); static void sort_task_group_info_array(void); static void print_task_group_info_array(void); static void reuse_task_group_info_array(void); static void free_task_group_info_array(void); static void fill_task_group_info_array(int, ulong, char *, int); static void dump_tasks_by_task_group(void); static void task_struct_member(struct task_context *,unsigned int, struct reference *); static void signal_reference(struct task_context *, ulong, struct reference *); static void do_sig_thread_group(ulong); static void dump_signal_data(struct task_context *, ulong); #define TASK_LEVEL (0x1) #define THREAD_GROUP_LEVEL (0x2) #define TASK_INDENT (0x4) static int sigrt_minmax(int *, int *); static void signame_list(void); static void sigqueue_list(ulong); static ulonglong task_signal(ulong, ulong*); static ulonglong task_blocked(ulong); static void translate_sigset(ulonglong); static ulonglong sigaction_mask(ulong); static int task_has_cpu(ulong, char *); static int is_foreach_keyword(char *, int *); static void foreach_cleanup(void *); static void ps_cleanup(void *); static char *task_pointer_string(struct task_context *, ulong, char *); static int panic_context_adjusted(struct task_context *tc); static void show_last_run(struct task_context *, struct psinfo *); static void show_milliseconds(struct task_context *, struct psinfo *); static char *translate_nanoseconds(ulonglong, char *); static int sort_by_last_run(const void *arg1, const void *arg2); static void sort_context_array_by_last_run(void); static void show_ps_summary(ulong); static void irqstacks_init(void); static void parse_task_thread(int argcnt, char *arglist[], struct task_context *); /* * Figure out how much space will be required to hold the task context * data, malloc() it, and call refresh_task_table() to fill it up. * Gather a few key offset and size values. Lastly, get, and then set, * the initial context. */ void task_init(void) { long len; int dim, task_struct_size; struct syment *nsp; long tss_offset, thread_offset; long eip_offset, esp_offset, ksp_offset; struct gnu_request req; ulong active_pid; if (!(tt->idle_threads = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc idle_threads array"); if (DUMPFILE() && !(tt->panic_threads = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc panic_threads array"); if (kernel_symbol_exists("nr_tasks")) { /* * Figure out what maximum NR_TASKS would be by getting the * address of the next symbol after "task". */ tt->task_start = symbol_value("task"); if ((nsp = next_symbol("task", NULL)) == NULL) error(FATAL, "cannot determine size of task table\n"); tt->flags |= TASK_ARRAY_EXISTS; tt->task_end = nsp->value; tt->max_tasks = (tt->task_end-tt->task_start) / sizeof(void *); allocate_task_space(tt->max_tasks); tss_offset = MEMBER_OFFSET_INIT(task_struct_tss, "task_struct", "tss"); eip_offset = MEMBER_OFFSET_INIT(thread_struct_eip, "thread_struct", "eip"); esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp, "thread_struct", "esp"); ksp_offset = MEMBER_OFFSET_INIT(thread_struct_ksp, "thread_struct", "ksp"); ASSIGN_OFFSET(task_struct_tss_eip) = (eip_offset == INVALID_OFFSET) ? INVALID_OFFSET : tss_offset + eip_offset; ASSIGN_OFFSET(task_struct_tss_esp) = (esp_offset == INVALID_OFFSET) ? INVALID_OFFSET : tss_offset + esp_offset; ASSIGN_OFFSET(task_struct_tss_ksp) = (ksp_offset == INVALID_OFFSET) ? INVALID_OFFSET : tss_offset + ksp_offset; tt->flags |= TASK_REFRESH; tt->refresh_task_table = refresh_fixed_task_table; readmem(tt->task_start, KVADDR, &tt->idle_threads[0], kt->cpus * sizeof(void *), "idle threads", FAULT_ON_ERROR); } else { /* * Make the task table big enough to hold what's running. * It can be realloc'd later if it grows on a live system. */ get_symbol_data("nr_threads", sizeof(int), &tt->nr_threads); tt->max_tasks = tt->nr_threads + NR_CPUS + TASK_SLUSH; allocate_task_space(tt->max_tasks); thread_offset = MEMBER_OFFSET_INIT(task_struct_thread, "task_struct", "thread"); eip_offset = MEMBER_OFFSET_INIT(thread_struct_eip, "thread_struct", "eip"); esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp, "thread_struct", "esp"); /* * Handle x86/x86_64 merger. */ if (eip_offset == INVALID_OFFSET) eip_offset = MEMBER_OFFSET_INIT(thread_struct_eip, "thread_struct", "ip"); if (esp_offset == INVALID_OFFSET) esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp, "thread_struct", "sp"); ksp_offset = MEMBER_OFFSET_INIT(thread_struct_ksp, "thread_struct", "ksp"); ASSIGN_OFFSET(task_struct_thread_eip) = (eip_offset == INVALID_OFFSET) ? INVALID_OFFSET : thread_offset + eip_offset; ASSIGN_OFFSET(task_struct_thread_esp) = (esp_offset == INVALID_OFFSET) ? INVALID_OFFSET : thread_offset + esp_offset; ASSIGN_OFFSET(task_struct_thread_ksp) = (ksp_offset == INVALID_OFFSET) ? INVALID_OFFSET : thread_offset + ksp_offset; tt->flags |= TASK_REFRESH; tt->refresh_task_table = refresh_unlimited_task_table; get_idle_threads(&tt->idle_threads[0], kt->cpus); } if (MEMBER_EXISTS("task_struct", "thread_info")) MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", "thread_info"); else if (MEMBER_EXISTS("task_struct", "stack")) MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", "stack"); else ASSIGN_OFFSET(task_struct_thread_info) = INVALID_OFFSET; if (VALID_MEMBER(task_struct_thread_info)) { MEMBER_OFFSET_INIT(thread_info_task, "thread_info", "task"); MEMBER_OFFSET_INIT(thread_info_cpu, "thread_info", "cpu"); MEMBER_OFFSET_INIT(thread_info_flags, "thread_info", "flags"); MEMBER_OFFSET_INIT(thread_info_previous_esp, "thread_info", "previous_esp"); STRUCT_SIZE_INIT(thread_info, "thread_info"); tt->flags |= THREAD_INFO; } MEMBER_OFFSET_INIT(task_struct_state, "task_struct", "state"); MEMBER_OFFSET_INIT(task_struct_exit_state, "task_struct", "exit_state"); MEMBER_OFFSET_INIT(task_struct_pid, "task_struct", "pid"); MEMBER_OFFSET_INIT(task_struct_comm, "task_struct", "comm"); MEMBER_OFFSET_INIT(task_struct_next_task, "task_struct", "next_task"); MEMBER_OFFSET_INIT(task_struct_processor, "task_struct", "processor"); MEMBER_OFFSET_INIT(task_struct_p_pptr, "task_struct", "p_pptr"); MEMBER_OFFSET_INIT(task_struct_parent, "task_struct", "parent"); if (INVALID_MEMBER(task_struct_parent)) MEMBER_OFFSET_INIT(task_struct_parent, "task_struct", "real_parent"); MEMBER_OFFSET_INIT(task_struct_has_cpu, "task_struct", "has_cpu"); MEMBER_OFFSET_INIT(task_struct_cpus_runnable, "task_struct", "cpus_runnable"); MEMBER_OFFSET_INIT(task_struct_cpu, "task_struct", "cpu"); MEMBER_OFFSET_INIT(task_struct_active_mm, "task_struct", "active_mm"); MEMBER_OFFSET_INIT(task_struct_next_run, "task_struct", "next_run"); MEMBER_OFFSET_INIT(task_struct_flags, "task_struct", "flags"); MEMBER_OFFSET_INIT(task_struct_pidhash_next, "task_struct", "pidhash_next"); MEMBER_OFFSET_INIT(task_struct_pgrp, "task_struct", "pgrp"); MEMBER_OFFSET_INIT(task_struct_tgid, "task_struct", "tgid"); MEMBER_OFFSET_INIT(task_struct_pids, "task_struct", "pids"); MEMBER_OFFSET_INIT(task_struct_last_run, "task_struct", "last_run"); MEMBER_OFFSET_INIT(task_struct_timestamp, "task_struct", "timestamp"); MEMBER_OFFSET_INIT(task_struct_sched_info, "task_struct", "sched_info"); if (VALID_MEMBER(task_struct_sched_info)) MEMBER_OFFSET_INIT(sched_info_last_arrival, "sched_info", "last_arrival"); if (VALID_MEMBER(task_struct_last_run) || VALID_MEMBER(task_struct_timestamp) || VALID_MEMBER(sched_info_last_arrival)) { char buf[BUFSIZE]; strcpy(buf, "alias last ps -l"); alias_init(buf); } MEMBER_OFFSET_INIT(pid_link_pid, "pid_link", "pid"); MEMBER_OFFSET_INIT(pid_hash_chain, "pid", "hash_chain"); STRUCT_SIZE_INIT(pid_link, "pid_link"); STRUCT_SIZE_INIT(upid, "upid"); if (VALID_STRUCT(upid)) { MEMBER_OFFSET_INIT(upid_nr, "upid", "nr"); MEMBER_OFFSET_INIT(upid_ns, "upid", "ns"); MEMBER_OFFSET_INIT(upid_pid_chain, "upid", "pid_chain"); MEMBER_OFFSET_INIT(pid_numbers, "pid", "numbers"); MEMBER_OFFSET_INIT(pid_tasks, "pid", "tasks"); tt->init_pid_ns = symbol_value("init_pid_ns"); } MEMBER_OFFSET_INIT(pid_pid_chain, "pid", "pid_chain"); STRUCT_SIZE_INIT(task_struct, "task_struct"); if (kernel_symbol_exists("arch_task_struct_size") && readmem(symbol_value("arch_task_struct_size"), KVADDR, &task_struct_size, sizeof(int), "arch_task_struct_size", RETURN_ON_ERROR)) { ASSIGN_SIZE(task_struct) = task_struct_size; if (STRUCT_SIZE("task_struct") != SIZE(task_struct)) add_to_downsized("task_struct"); if (CRASHDEBUG(1)) fprintf(fp, "downsize task_struct: %ld to %ld\n", STRUCT_SIZE("task_struct"), SIZE(task_struct)); } MEMBER_OFFSET_INIT(task_struct_sig, "task_struct", "sig"); MEMBER_OFFSET_INIT(task_struct_signal, "task_struct", "signal"); MEMBER_OFFSET_INIT(task_struct_blocked, "task_struct", "blocked"); MEMBER_OFFSET_INIT(task_struct_sigpending, "task_struct", "sigpending"); MEMBER_OFFSET_INIT(task_struct_pending, "task_struct", "pending"); MEMBER_OFFSET_INIT(task_struct_sigqueue, "task_struct", "sigqueue"); MEMBER_OFFSET_INIT(task_struct_sighand, "task_struct", "sighand"); MEMBER_OFFSET_INIT(signal_struct_count, "signal_struct", "count"); MEMBER_OFFSET_INIT(signal_struct_nr_threads, "signal_struct", "nr_threads"); MEMBER_OFFSET_INIT(signal_struct_action, "signal_struct", "action"); MEMBER_OFFSET_INIT(signal_struct_shared_pending, "signal_struct", "shared_pending"); MEMBER_OFFSET_INIT(k_sigaction_sa, "k_sigaction", "sa"); MEMBER_OFFSET_INIT(sigaction_sa_handler, "sigaction", "sa_handler"); MEMBER_OFFSET_INIT(sigaction_sa_mask, "sigaction", "sa_mask"); MEMBER_OFFSET_INIT(sigaction_sa_flags, "sigaction", "sa_flags"); MEMBER_OFFSET_INIT(sigpending_head, "sigpending", "head"); if (INVALID_MEMBER(sigpending_head)) MEMBER_OFFSET_INIT(sigpending_list, "sigpending", "list"); MEMBER_OFFSET_INIT(sigpending_signal, "sigpending", "signal"); MEMBER_SIZE_INIT(sigpending_signal, "sigpending", "signal"); STRUCT_SIZE_INIT(sigqueue, "sigqueue"); STRUCT_SIZE_INIT(signal_queue, "signal_queue"); STRUCT_SIZE_INIT(sighand_struct, "sighand_struct"); if (VALID_STRUCT(sighand_struct)) MEMBER_OFFSET_INIT(sighand_struct_action, "sighand_struct", "action"); MEMBER_OFFSET_INIT(siginfo_si_signo, "siginfo", "si_signo"); STRUCT_SIZE_INIT(signal_struct, "signal_struct"); STRUCT_SIZE_INIT(k_sigaction, "k_sigaction"); MEMBER_OFFSET_INIT(task_struct_start_time, "task_struct", "start_time"); MEMBER_SIZE_INIT(task_struct_start_time, "task_struct", "start_time"); MEMBER_SIZE_INIT(task_struct_utime, "task_struct", "utime"); MEMBER_SIZE_INIT(task_struct_stime, "task_struct", "stime"); MEMBER_OFFSET_INIT(task_struct_times, "task_struct", "times"); MEMBER_OFFSET_INIT(tms_tms_utime, "tms", "tms_utime"); MEMBER_OFFSET_INIT(tms_tms_stime, "tms", "tms_stime"); MEMBER_OFFSET_INIT(task_struct_utime, "task_struct", "utime"); MEMBER_OFFSET_INIT(task_struct_stime, "task_struct", "stime"); STRUCT_SIZE_INIT(cputime_t, "cputime_t"); if (symbol_exists("cfq_slice_async")) { uint cfq_slice_async; get_symbol_data("cfq_slice_async", sizeof(int), &cfq_slice_async); if (cfq_slice_async) { machdep->hz = cfq_slice_async * 25; if (CRASHDEBUG(2)) fprintf(fp, "cfq_slice_async exists: setting hz to %d\n", machdep->hz); } } if (VALID_MEMBER(runqueue_arrays)) MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", "run_list"); MEMBER_OFFSET_INIT(task_struct_rss_stat, "task_struct", "rss_stat"); MEMBER_OFFSET_INIT(task_rss_stat_count, "task_rss_stat", "count"); if ((tt->task_struct = (char *)malloc(SIZE(task_struct))) == NULL) error(FATAL, "cannot malloc task_struct space."); if ((tt->mm_struct = (char *)malloc(SIZE(mm_struct))) == NULL) error(FATAL, "cannot malloc mm_struct space."); if ((tt->flags & THREAD_INFO) && ((tt->thread_info = (char *)malloc(SIZE(thread_info))) == NULL)) error(FATAL, "cannot malloc thread_info space."); STRUCT_SIZE_INIT(task_union, "task_union"); STRUCT_SIZE_INIT(thread_union, "thread_union"); if (VALID_SIZE(task_union) && (SIZE(task_union) != STACKSIZE())) { error(WARNING, "\nnon-standard stack size: %ld\n", len = SIZE(task_union)); machdep->stacksize = len; } else if (VALID_SIZE(thread_union) && ((len = SIZE(thread_union)) != STACKSIZE())) machdep->stacksize = len; if (symbol_exists("pidhash") && symbol_exists("pid_hash") && !symbol_exists("pidhash_shift")) error(FATAL, "pidhash and pid_hash both exist -- cannot distinquish between them\n"); if (symbol_exists("pid_hash") && symbol_exists("pidhash_shift")) { int pidhash_shift; if (get_symbol_type("PIDTYPE_PID", NULL, &req) != TYPE_CODE_ENUM) error(FATAL, "cannot determine PIDTYPE_PID pid_hash dimension\n"); get_symbol_data("pidhash_shift", sizeof(int), &pidhash_shift); tt->pidhash_len = 1 << pidhash_shift; get_symbol_data("pid_hash", sizeof(ulong), &tt->pidhash_addr); if (VALID_MEMBER(pid_link_pid) && VALID_MEMBER(pid_hash_chain)) { get_symbol_data("pid_hash", sizeof(ulong), &tt->pidhash_addr); tt->refresh_task_table = refresh_pid_hash_task_table; } else { tt->pidhash_addr = symbol_value("pid_hash"); if (LKCD_KERNTYPES()) { if (VALID_STRUCT(pid_link)) { if (VALID_STRUCT(upid) && VALID_MEMBER(pid_numbers)) tt->refresh_task_table = refresh_hlist_task_table_v3; else tt->refresh_task_table = refresh_hlist_task_table_v2; } else tt->refresh_task_table = refresh_hlist_task_table; builtin_array_length("pid_hash", tt->pidhash_len, NULL); } else { if (!get_array_length("pid_hash", NULL, sizeof(void *)) && VALID_STRUCT(pid_link)) { if (VALID_STRUCT(upid) && VALID_MEMBER(pid_numbers)) tt->refresh_task_table = refresh_hlist_task_table_v3; else tt->refresh_task_table = refresh_hlist_task_table_v2; } else tt->refresh_task_table = refresh_hlist_task_table; } } tt->flags |= PID_HASH; } else if (symbol_exists("pid_hash")) { if (get_symbol_type("PIDTYPE_PGID", NULL, &req) != TYPE_CODE_ENUM) error(FATAL, "cannot determine PIDTYPE_PID pid_hash dimension\n"); if (!(tt->pidhash_len = get_array_length("pid_hash", &dim, SIZE(list_head)))) error(FATAL, "cannot determine pid_hash array dimensions\n"); tt->pidhash_addr = symbol_value("pid_hash"); tt->refresh_task_table = refresh_pid_hash_task_table; tt->flags |= PID_HASH; } else if (symbol_exists("pidhash")) { tt->pidhash_addr = symbol_value("pidhash"); tt->pidhash_len = get_array_length("pidhash", NULL, 0); if (tt->pidhash_len == 0) { if (!(nsp = next_symbol("pidhash", NULL))) error(FATAL, "cannot determine pidhash length\n"); tt->pidhash_len = (nsp->value-tt->pidhash_addr) / sizeof(void *); } if (ACTIVE()) tt->refresh_task_table = refresh_pidhash_task_table; tt->flags |= PIDHASH; } /* * Get the IRQ stacks info if it's configured. */ if (VALID_STRUCT(irq_ctx)) irqstacks_init(); get_active_set(); if (tt->flags & ACTIVE_ONLY) tt->refresh_task_table = refresh_active_task_table; tt->refresh_task_table(); if (tt->flags & TASK_REFRESH_OFF) tt->flags &= ~(TASK_REFRESH|TASK_REFRESH_OFF); if (ACTIVE()) { active_pid = REMOTE() ? pc->server_pid : pc->program_pid; set_context(NO_TASK, active_pid); tt->this_task = pid_to_task(active_pid); } else { if (KDUMP_DUMPFILE()) map_cpus_to_prstatus(); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) map_cpus_to_prstatus_kdump_cmprs(); please_wait("determining panic task"); set_context(get_panic_context(), NO_PID); please_wait_done(); } sort_context_array(); sort_tgid_array(); if (pc->flags & SILENT) initialize_task_state(); tt->flags |= TASK_INIT_DONE; } /* * Store the pointers to the hard and soft irq_ctx arrays as well as * the task pointers contained within each of them. */ static void irqstacks_init(void) { int i; char *thread_info_buf; struct syment *hard_sp, *soft_sp; if (!(tt->hardirq_ctx = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc hardirq_ctx space."); if (!(tt->hardirq_tasks = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc hardirq_tasks space."); if (!(tt->softirq_ctx = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc softirq_ctx space."); if (!(tt->softirq_tasks = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc softirq_tasks space."); thread_info_buf = GETBUF(SIZE(irq_ctx)); if ((hard_sp = per_cpu_symbol_search("per_cpu__hardirq_ctx"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { for (i = 0; i < NR_CPUS; i++) { if (!kt->__per_cpu_offset[i]) continue; tt->hardirq_ctx[i] = hard_sp->value + kt->__per_cpu_offset[i]; } } else tt->hardirq_ctx[0] = hard_sp->value; } else if (symbol_exists("hardirq_ctx")) { i = get_array_length("hardirq_ctx", NULL, 0); get_symbol_data("hardirq_ctx", sizeof(long)*(i <= NR_CPUS ? i : NR_CPUS), &tt->hardirq_ctx[0]); } else error(WARNING, "cannot determine hardirq_ctx addresses\n"); for (i = 0; i < NR_CPUS; i++) { if (!(tt->hardirq_ctx[i])) continue; if (!readmem(tt->hardirq_ctx[i], KVADDR, thread_info_buf, SIZE(irq_ctx), "hardirq thread_union", RETURN_ON_ERROR)) { error(INFO, "cannot read hardirq_ctx[%d] at %lx\n", i, tt->hardirq_ctx[i]); continue; } tt->hardirq_tasks[i] = ULONG(thread_info_buf+OFFSET(thread_info_task)); } if ((soft_sp = per_cpu_symbol_search("per_cpu__softirq_ctx"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { for (i = 0; i < NR_CPUS; i++) { if (!kt->__per_cpu_offset[i]) continue; tt->softirq_ctx[i] = soft_sp->value + kt->__per_cpu_offset[i]; } } else tt->softirq_ctx[0] = soft_sp->value; } else if (symbol_exists("softirq_ctx")) { i = get_array_length("softirq_ctx", NULL, 0); get_symbol_data("softirq_ctx", sizeof(long)*(i <= NR_CPUS ? i : NR_CPUS), &tt->softirq_ctx[0]); } else error(WARNING, "cannot determine softirq_ctx addresses\n"); for (i = 0; i < NR_CPUS; i++) { if (!(tt->softirq_ctx[i])) continue; if (!readmem(tt->softirq_ctx[i], KVADDR, thread_info_buf, SIZE(irq_ctx), "softirq thread_union", RETURN_ON_ERROR)) { error(INFO, "cannot read softirq_ctx[%d] at %lx\n", i, tt->hardirq_ctx[i]); continue; } tt->softirq_tasks[i] = ULONG(thread_info_buf+OFFSET(thread_info_task)); } tt->flags |= IRQSTACKS; FREEBUF(thread_info_buf); } int in_irq_ctx(ulonglong type, int cpu, ulong addr) { if (!(tt->flags & IRQSTACKS)) return FALSE; switch (type) { case BT_SOFTIRQ: if (tt->softirq_ctx[cpu] && (addr >= tt->softirq_ctx[cpu]) && (addr < (tt->softirq_ctx[cpu] + STACKSIZE()))) return TRUE; break; case BT_HARDIRQ: if (tt->hardirq_ctx[cpu] && (addr >= tt->hardirq_ctx[cpu]) && (addr < (tt->hardirq_ctx[cpu] + STACKSIZE()))) return TRUE; break; } return FALSE; } /* * Allocate or re-allocated space for the task_context array and task list. */ static void allocate_task_space(int cnt) { if (tt->context_array == NULL) { if (!(tt->task_local = (void *) malloc(cnt * sizeof(void *)))) error(FATAL, "cannot malloc kernel task array (%d tasks)", cnt); if (!(tt->context_array = (struct task_context *) malloc(cnt * sizeof(struct task_context)))) error(FATAL, "cannot malloc context array (%d tasks)", cnt); if (!(tt->tgid_array = (struct tgid_context *) malloc(cnt * sizeof(struct tgid_context)))) error(FATAL, "cannot malloc tgid array (%d tasks)", cnt); } else { if (!(tt->task_local = (void *) realloc(tt->task_local, cnt * sizeof(void *)))) error(FATAL, "%scannot realloc kernel task array (%d tasks)", (pc->flags & RUNTIME) ? "" : "\n", cnt); if (!(tt->context_array = (struct task_context *) realloc(tt->context_array, cnt * sizeof(struct task_context)))) error(FATAL, "%scannot realloc context array (%d tasks)", (pc->flags & RUNTIME) ? "" : "\n", cnt); if (!(tt->tgid_array = (struct tgid_context *) realloc(tt->tgid_array, cnt * sizeof(struct tgid_context)))) error(FATAL, "%scannot realloc tgid array (%d tasks)", (pc->flags & RUNTIME) ? "" : "\n", cnt); } } /* * This routine runs one time on dumpfiles, and constantly on live systems. * It walks through the kernel task array looking for active tasks, and * populates the local task table with their essential data. */ static void refresh_fixed_task_table(void) { int i; ulong *tlp; struct task_context *tc; ulong curtask; ulong retries; ulong curpid; char *tp; #define TASK_FREE(x) ((x == 0) || (((ulong)(x) >= tt->task_start) && \ ((ulong)(x) < tt->task_end))) #define TASK_IN_USE(x) (!TASK_FREE(x)) if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) return; if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "%splease wait... (gathering task table data)", GDB_PATCHED() ? "" : "\n"); fflush(fp); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } retries = 0; retry: if (!readmem(tt->task_start, KVADDR, tt->task_local, tt->max_tasks * sizeof(void *), "kernel task array", RETURN_ON_ERROR)) error(FATAL, "cannot read kernel task array"); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0, tc = tt->context_array; i < tt->max_tasks; i++, tlp++) { if (TASK_IN_USE(*tlp)) { if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry; } if (store_context(tc, *tlp, tp)) { tc++; tt->running_tasks++; } } } if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\r \r"); fflush(fp); } if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * Verify that a task_context's data makes sense enough to include * in the task_context array. */ static int verify_task(struct task_context *tc, int level) { int i; ulong next_task; ulong readflag; readflag = ACTIVE() ? (RETURN_ON_ERROR|QUIET) : (RETURN_ON_ERROR); switch (level) { case 1: if (!readmem(tc->task + OFFSET(task_struct_next_task), KVADDR, &next_task, sizeof(void *), "next_task", readflag)) { return FALSE; } if (!IS_TASK_ADDR(next_task)) return FALSE; if (tc->processor & ~NO_PROC_ID) return FALSE; /* fall through */ case 2: if (!IS_TASK_ADDR(tc->ptask)) return FALSE; if ((tc->processor < 0) || (tc->processor >= NR_CPUS)) { for (i = 0; i < NR_CPUS; i++) { if (tc->task == tt->active_set[i]) { error(WARNING, "active task %lx on cpu %d: corrupt cpu value: %u\n\n", tc->task, i, tc->processor); tc->processor = i; return TRUE; } } if (CRASHDEBUG(1)) error(INFO, "verify_task: task: %lx invalid processor: %u", tc->task, tc->processor); return FALSE; } break; } return TRUE; } /* * This routine runs one time on dumpfiles, and constantly on live systems. * It walks through the kernel task array looking for active tasks, and * populates the local task table with their essential data. */ #define MAX_UNLIMITED_TASK_RETRIES (500) void refresh_unlimited_task_table(void) { int i; ulong *tlp; struct task_context *tc; ulong curtask; ulong curpid; struct list_data list_data, *ld; ulong init_tasks[NR_CPUS]; ulong retries; char *tp; int cnt; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) return; if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "%splease wait... (gathering task table data)", GDB_PATCHED() ? "" : "\n"); fflush(fp); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; tp = NULL; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } retries = 0; retry: if (retries && DUMPFILE()) { if (tt->flags & PIDHASH) { error(WARNING, "\ncannot gather a stable task list -- trying pidhash\n"); refresh_pidhash_task_table(); return; } error(FATAL, "\ncannot gather a stable task list\n"); } if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "cannot gather a stable task list\n"); /* * Populate the task_local array with a quick walk-through. * If there's not enough room in the local array, realloc() it. */ ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= RETURN_ON_LIST_ERROR; ld->start = symbol_value("init_task_union"); ld->member_offset = OFFSET(task_struct_next_task); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(tp); return; } if ((cnt = do_list(ld)) < 0) { retries++; goto retry; } if ((cnt+NR_CPUS+1) > tt->max_tasks) { tt->max_tasks = cnt + NR_CPUS + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); /* * If SMP, add in the other idle tasks. */ if (kt->flags & SMP) { /* * Now get the rest of the init_task[] entries, starting * at offset 1 since we've got the init_task already. */ BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS); get_idle_threads(&init_tasks[0], kt->cpus); tlp = (ulong *)tt->task_local; tlp += cnt; for (i = 1; i < kt->cpus; i++) { if (init_tasks[i]) { *tlp = init_tasks[i]; tlp++; } } } clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0, tc = tt->context_array; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(INFO, "\ninvalid task address in task list: %lx\n", *tlp); retries++; goto retry; } if (task_exists(*tlp)) { error(INFO, "\nduplicate task address in task list: %lx\n", *tlp); retries++; goto retry; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry; } if (store_context(tc, *tlp, tp)) { tc++; tt->running_tasks++; } } if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\r \r"); fflush(fp); } if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * This routine runs one time on dumpfiles, and constantly on live systems. * It walks through the kernel pidhash array looking for active tasks, and * populates the local task table with their essential data. * * The following manner of refreshing the task table can be used for all * kernels that have a pidhash[] array, whether or not they still * have a fixed task[] array or an unlimited list. */ static void refresh_pidhash_task_table(void) { int i; char *pidhash, *tp; ulong *pp, next, pnext; int len, cnt; struct task_context *tc; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\rplease wait... (gathering task table data)"); fflush(fp); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } len = tt->pidhash_len; pidhash = GETBUF(len * sizeof(ulong)); retries = 0; retry_pidhash: if (retries && DUMPFILE()) error(FATAL,"\ncannot gather a stable task list via pidhash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pidhash (%d retries)\n", retries); if (!readmem(tt->pidhash_addr, KVADDR, pidhash, len * sizeof(ulong), "pidhash contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pidhash array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pidhash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } /* * Then dump the pidhash contents. */ for (i = 0, pp = (ulong *)pidhash; i < len; i++, pp++) { if (!(*pp) || !IS_KVADDR(*pp)) continue; /* * Mininum verification here -- make sure that a task address * and its pidhash_next entry (if any) both appear to be * properly aligned before accepting the task. */ next = *pp; while (next) { if (!IS_TASK_ADDR(next)) { error(INFO, "%sinvalid task address in pidhash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pidhash; } if (!readmem(next + OFFSET(task_struct_pidhash_next), KVADDR, &pnext, sizeof(void *), "pidhash_next entry", QUIET|RETURN_ON_ERROR)) { error(INFO, "%scannot read from task: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pidhash; } if (!hq_enter(next)) { error(INFO, "%sduplicate task in pidhash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pidhash; } next = pnext; cnt++; } } if ((cnt+1) > tt->max_tasks) { tt->max_tasks = cnt + NR_CPUS + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry_pidhash; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0, tc = tt->context_array; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pidhash; } if (task_exists(*tlp)) { error(WARNING, "%sduplicate task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pidhash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pidhash; } if (store_context(tc, *tlp, tp)) { tc++; tt->running_tasks++; } } FREEBUF(pidhash); if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\r \r"); fflush(fp); } if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * The following manner of refreshing the task table is used for all * kernels that have a pid_hash[][] array. * * This routine runs one time on dumpfiles, and constantly on live systems. * It walks through the kernel pid_hash[PIDTYPE_PID] array looking for active * tasks, and populates the local task table with their essential data. */ #define HASH_TO_TASK(X) ((ulong)(X) - (OFFSET(task_struct_pids) + \ OFFSET(pid_link_pid) + OFFSET(pid_hash_chain))) #define TASK_TO_HASH(X) ((ulong)(X) + (OFFSET(task_struct_pids) + \ OFFSET(pid_link_pid) + OFFSET(pid_hash_chain))) static void refresh_pid_hash_task_table(void) { int i; struct kernel_list_head *pid_hash, *pp, *kpp; char *tp; ulong next, pnext; int len, cnt; struct task_context *tc; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } len = tt->pidhash_len; pid_hash = (struct kernel_list_head *)GETBUF(len * SIZE(list_head)); retries = 0; retry_pid_hash: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via pid_hash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pid_hash (%d retries)\n", retries); if (!readmem(tt->pidhash_addr, KVADDR, pid_hash, len * SIZE(list_head), "pid_hash contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pid_hash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < len; i++) { pp = &pid_hash[i]; kpp = (struct kernel_list_head *)(tt->pidhash_addr + i * SIZE(list_head)); if (pp->next == kpp) continue; if (CRASHDEBUG(7)) console("%lx: pid_hash[%d]: %lx (%lx) %lx (%lx)\n", kpp, i, pp->next, HASH_TO_TASK(pp->next), pp->prev, HASH_TO_TASK(pp->prev)); next = (ulong)HASH_TO_TASK(pp->next); while (next) { if (!IS_TASK_ADDR(next)) { error(INFO, "%sinvalid task address in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!readmem(TASK_TO_HASH(next), KVADDR, &pnext, sizeof(void *), "pid_hash entry", QUIET|RETURN_ON_ERROR)) { error(INFO, "%scannot read from task: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!is_idle_thread(next) && !hq_enter(next)) { error(INFO, "%sduplicate task in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } cnt++; if (pnext == (ulong)kpp) break; next = HASH_TO_TASK(pnext); } } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0, tc = tt->context_array; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (task_exists(*tlp)) { error(WARNING, "%sduplicate task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (store_context(tc, *tlp, tp)) { tc++; tt->running_tasks++; } } FREEBUF(pid_hash); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * Adapt to yet another scheme, using later 2.6 hlist_head and hlist_nodes. */ #define HLIST_TO_TASK(X) ((ulong)(X) - (OFFSET(task_struct_pids) + \ OFFSET(pid_pid_chain))) static void refresh_hlist_task_table(void) { int i; ulong *pid_hash; struct syment *sp; ulong pidhash_array; ulong kpp; char *tp; ulong next, pnext, pprev; char *nodebuf; int plen, len, cnt; long value; struct task_context *tc; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } if (!(plen = get_array_length("pid_hash", NULL, sizeof(void *)))) { /* * Workaround for gcc omitting debuginfo data for pid_hash. */ if (enumerator_value("PIDTYPE_MAX", &value)) { if ((sp = next_symbol("pid_hash", NULL)) && (((sp->value - tt->pidhash_addr) / sizeof(void *)) < value)) error(WARNING, "possible pid_hash array mis-handling\n"); plen = (int)value; } else { error(WARNING, "cannot determine pid_hash array dimensions\n"); plen = 1; } } pid_hash = (ulong *)GETBUF(plen * sizeof(void *)); if (!readmem(tt->pidhash_addr, KVADDR, pid_hash, plen * SIZE(hlist_head), "pid_hash[] contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash array\n"); if (CRASHDEBUG(7)) for (i = 0; i < plen; i++) console("pid_hash[%d]: %lx\n", i, pid_hash[i]); /* * The zero'th (PIDTYPE_PID) entry is the hlist_head array * that we want. */ if (CRASHDEBUG(1)) { if (!enumerator_value("PIDTYPE_PID", &value)) error(WARNING, "possible pid_hash array mis-handling: PIDTYPE_PID: (unknown)\n"); else if (value != 0) error(WARNING, "possible pid_hash array mis-handling: PIDTYPE_PID: %d \n", value); } pidhash_array = pid_hash[0]; FREEBUF(pid_hash); len = tt->pidhash_len; pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); nodebuf = GETBUF(SIZE(hlist_node)); retries = 0; retry_pid_hash: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via pid_hash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pid_hash (%d retries)\n", retries); if (!readmem(pidhash_array, KVADDR, pid_hash, len * SIZE(hlist_head), "pid_hash[0] contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash[0] array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pid_hash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < len; i++) { if (!pid_hash[i]) continue; if (!readmem(pid_hash[i], KVADDR, nodebuf, SIZE(hlist_node), "pid_hash node", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read pid_hash node\n"); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_pid_hash; } kpp = pid_hash[i]; next = (ulong)HLIST_TO_TASK(kpp); pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); if (CRASHDEBUG(1)) console("pid_hash[%d]: %lx task: %lx (node: %lx) next: %lx pprev: %lx\n", i, pid_hash[i], next, kpp, pnext, pprev); while (next) { if (!IS_TASK_ADDR(next)) { error(INFO, "%sinvalid task address in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!is_idle_thread(next) && !hq_enter(next)) { error(INFO, "%sduplicate task in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } cnt++; if (!pnext) break; if (!readmem((ulonglong)pnext, KVADDR, nodebuf, SIZE(hlist_node), "task hlist_node", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read hlist_node from task\n"); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } kpp = (ulong)pnext; next = (ulong)HLIST_TO_TASK(kpp); pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); if (CRASHDEBUG(1)) console(" chained task: %lx (node: %lx) next: %lx pprev: %lx\n", (ulong)HLIST_TO_TASK(kpp), kpp, pnext, pprev); } } if (cnt > tt->max_tasks) { tt->max_tasks = cnt + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry_pid_hash; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0, tc = tt->context_array; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (task_exists(*tlp)) { error(WARNING, "%sduplicate task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (store_context(tc, *tlp, tp)) { tc++; tt->running_tasks++; } } FREEBUF(pid_hash); FREEBUF(nodebuf); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * 2.6.17 replaced: * static struct hlist_head *pid_hash[PIDTYPE_MAX]; * with * static struct hlist_head *pid_hash; */ static void refresh_hlist_task_table_v2(void) { int i; ulong *pid_hash; ulong pidhash_array; ulong kpp; char *tp; ulong next, pnext, pprev; char *nodebuf; int len, cnt; struct task_context *tc; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } get_symbol_data("pid_hash", sizeof(void *), &pidhash_array); len = tt->pidhash_len; pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); nodebuf = GETBUF(SIZE(pid_link)); retries = 0; retry_pid_hash: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via pid_hash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pid_hash (%d retries)\n", retries); if (!readmem(pidhash_array, KVADDR, pid_hash, len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pid_hash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < len; i++) { if (!pid_hash[i]) continue; if (!readmem(pid_hash[i], KVADDR, nodebuf, SIZE(pid_link), "pid_hash node pid_link", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read pid_hash node pid_link\n"); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_pid_hash; } kpp = pid_hash[i]; next = ULONG(nodebuf + OFFSET(pid_link_pid)); if (next) next -= OFFSET(task_struct_pids); pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); if (CRASHDEBUG(1)) console("pid_hash[%d]: %lx task: %lx (node: %lx) next: %lx pprev: %lx\n", i, pid_hash[i], next, kpp, pnext, pprev); while (1) { if (next) { if (!IS_TASK_ADDR(next)) { error(INFO, "%sinvalid task address in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!is_idle_thread(next) && !hq_enter(next)) { error(INFO, "%sduplicate task in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } } cnt++; if (!pnext) break; if (!readmem((ulonglong)pnext, KVADDR, nodebuf, SIZE(pid_link), "task hlist_node pid_link", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read hlist_node pid_link from node next\n"); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } kpp = (ulong)pnext; next = ULONG(nodebuf + OFFSET(pid_link_pid)); if (next) next -= OFFSET(task_struct_pids); pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); if (CRASHDEBUG(1)) console(" chained task: %lx (node: %lx) next: %lx pprev: %lx\n", next, kpp, pnext, pprev); } } if (cnt > tt->max_tasks) { tt->max_tasks = cnt + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry_pid_hash; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0, tc = tt->context_array; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (task_exists(*tlp)) { error(WARNING, "%sduplicate task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (store_context(tc, *tlp, tp)) { tc++; tt->running_tasks++; } } FREEBUF(pid_hash); FREEBUF(nodebuf); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * 2.6.24: The pid_hash[] hlist_head entries were changed to point * to the hlist_node structure embedded in a upid structure. */ static void refresh_hlist_task_table_v3(void) { int i; ulong *pid_hash; ulong pidhash_array; ulong kpp; char *tp; ulong next, pnext, pprev; ulong upid; char *nodebuf; int len, cnt; struct task_context *tc; ulong curtask; ulong curpid; ulong retries; ulong *tlp; uint upid_nr; ulong upid_ns; int chained; ulong pid; ulong pid_tasks_0; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } get_symbol_data("pid_hash", sizeof(void *), &pidhash_array); len = tt->pidhash_len; pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); nodebuf = GETBUF(SIZE(upid)); retries = 0; retry_pid_hash: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via pid_hash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pid_hash (%d retries)\n", retries); if (!readmem(pidhash_array, KVADDR, pid_hash, len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pid_hash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (!tt->idle_threads[i]) continue; if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < len; i++) { if (!pid_hash[i]) continue; kpp = pid_hash[i]; upid = pid_hash[i] - OFFSET(upid_pid_chain); chained = 0; do_chained: if (!readmem(upid, KVADDR, nodebuf, SIZE(upid), "pid_hash upid", RETURN_ON_ERROR|QUIET)) { error(INFO, "\npid_hash[%d]: cannot read pid_hash upid\n", i); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_pid_hash; } pnext = ULONG(nodebuf + OFFSET(upid_pid_chain) + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(upid_pid_chain) + OFFSET(hlist_node_pprev)); upid_nr = UINT(nodebuf + OFFSET(upid_nr)); upid_ns = ULONG(nodebuf + OFFSET(upid_ns)); /* * Use init_pid_ns level 0 (PIDTYPE_PID). */ if (upid_ns != tt->init_pid_ns) { if (!accessible(upid_ns)) { error(INFO, "%spid_hash[%d]: invalid upid.ns: %lx\n", DUMPFILE() ? "\n" : "", i, upid_ns); continue; } goto chain_next; } pid = upid - OFFSET(pid_numbers); if (!readmem(pid + OFFSET(pid_tasks), KVADDR, &pid_tasks_0, sizeof(void *), "pid tasks", RETURN_ON_ERROR|QUIET)) { error(INFO, "\npid_hash[%d]: cannot read pid.tasks[0]\n", i); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_pid_hash; } if (pid_tasks_0 == 0) goto chain_next; next = pid_tasks_0 - OFFSET(task_struct_pids); if (CRASHDEBUG(1)) { if (chained) console(" %lx upid: %lx nr: %d pid: %lx\n" " pnext/pprev: %.*lx/%lx task: %lx\n", kpp, upid, upid_nr, pid, VADDR_PRLEN, pnext, pprev, next); else console("pid_hash[%4d]: %lx upid: %lx nr: %d pid: %lx\n" " pnext/pprev: %.*lx/%lx task: %lx\n", i, kpp, upid, upid_nr, pid, VADDR_PRLEN, pnext, pprev, next); } if (!IS_TASK_ADDR(next)) { error(INFO, "%spid_hash[%d]: invalid task address: %lx\n", DUMPFILE() ? "\n" : "", i, next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!is_idle_thread(next) && !hq_enter(next)) { error(INFO, "%spid_hash[%d]: duplicate task: %lx\n", DUMPFILE() ? "\n" : "", i, next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } cnt++; chain_next: if (pnext) { if (chained >= tt->max_tasks) { error(INFO, "%spid_hash[%d]: corrupt/invalid upid chain\n", DUMPFILE() ? "\n" : "", i); continue; } kpp = pnext; upid = pnext - OFFSET(upid_pid_chain); chained++; goto do_chained; } } if (cnt > tt->max_tasks) { tt->max_tasks = cnt + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry_pid_hash; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0, tc = tt->context_array; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (task_exists(*tlp)) { error(WARNING, "%sduplicate task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (store_context(tc, *tlp, tp)) { tc++; tt->running_tasks++; } } FREEBUF(pid_hash); FREEBUF(nodebuf); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } static void refresh_active_task_table(void) { int i; char *tp; int cnt; struct task_context *tc; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curtask = NO_TASK; curpid = NO_PID; retries = 0; get_active_set(); /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } retry_active: if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); return; } /* * Get the active tasks. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->active_set[i])) cnt++; else error(WARNING, "%sduplicate active tasks?\n", DUMPFILE() ? "\n" : ""); } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0, tc = tt->context_array; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_active; } if (task_exists(*tlp)) { error(WARNING, "%sduplicate task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_active; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_active; } if (store_context(tc, *tlp, tp)) { tc++; tt->running_tasks++; } else if (DUMPFILE()) error(WARNING, "corrupt/invalid active task: %lx\n", *tlp); } if (!tt->running_tasks) { if (DUMPFILE()) error(FATAL, "cannot determine any active tasks!\n"); retries++; goto retry_active; } please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * Fill a task_context structure with the data from a task. If a NULL * task_context pointer is passed in, use the next available one. */ static struct task_context * store_context(struct task_context *tc, ulong task, char *tp) { pid_t *pid_addr, *tgid_addr; char *comm_addr; int *processor_addr; ulong *parent_addr; ulong *mm_addr; int has_cpu; int do_verify; struct tgid_context *tg; processor_addr = NULL; if (tt->refresh_task_table == refresh_fixed_task_table) do_verify = 1; else if (tt->refresh_task_table == refresh_pid_hash_task_table) do_verify = 2; else if (tt->refresh_task_table == refresh_hlist_task_table) do_verify = 2; else if (tt->refresh_task_table == refresh_hlist_task_table_v2) do_verify = 2; else if (tt->refresh_task_table == refresh_hlist_task_table_v3) do_verify = 2; else if (tt->refresh_task_table == refresh_active_task_table) do_verify = 2; else do_verify = 0; if (!tc) tc = tt->context_array + tt->running_tasks; pid_addr = (pid_t *)(tp + OFFSET(task_struct_pid)); tgid_addr = (pid_t *)(tp + OFFSET(task_struct_tgid)); comm_addr = (char *)(tp + OFFSET(task_struct_comm)); if (tt->flags & THREAD_INFO) { tc->thread_info = ULONG(tp + OFFSET(task_struct_thread_info)); fill_thread_info(tc->thread_info); processor_addr = (int *) (tt->thread_info + OFFSET(thread_info_cpu)); } else if (VALID_MEMBER(task_struct_processor)) processor_addr = (int *) (tp + OFFSET(task_struct_processor)); else if (VALID_MEMBER(task_struct_cpu)) processor_addr = (int *) (tp + OFFSET(task_struct_cpu)); if (VALID_MEMBER(task_struct_p_pptr)) parent_addr = (ulong *)(tp + OFFSET(task_struct_p_pptr)); else parent_addr = (ulong *)(tp + OFFSET(task_struct_parent)); mm_addr = (ulong *)(tp + OFFSET(task_struct_mm)); has_cpu = task_has_cpu(task, tp); tc->pid = (ulong)(*pid_addr); strlcpy(tc->comm, comm_addr, TASK_COMM_LEN); tc->processor = *processor_addr; tc->ptask = *parent_addr; tc->mm_struct = *mm_addr; tc->task = task; tc->tc_next = NULL; /* * Fill a tgid_context structure with the data from * the incoming task. */ tg = tt->tgid_array + tt->running_tasks; tg->tgid = *tgid_addr; tg->task = task; if (do_verify && !verify_task(tc, do_verify)) { error(INFO, "invalid task address: %lx\n", tc->task); BZERO(tc, sizeof(struct task_context)); return NULL; } if (has_cpu && (tt->flags & POPULATE_PANIC)) tt->panic_threads[tc->processor] = tc->task; return tc; } /* * The current context may have moved to a new spot in the task table * or have exited since the last command. If it still exists, reset its * new position. If it doesn't exist, set the context back to the initial * crash context. If necessary, complain and show the restored context. */ static void refresh_context(ulong curtask, ulong curpid) { ulong value, complain; struct task_context *tc; if (task_exists(curtask) && pid_exists(curpid)) { set_context(curtask, NO_PID); } else { set_context(tt->this_task, NO_PID); complain = TRUE; if (STREQ(args[0], "set") && (argcnt == 2) && IS_A_NUMBER(args[1])) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: case STR_TASK: complain = FALSE; break; case STR_INVALID: complain = TRUE; break; } } if (complain) { error(INFO, "current context no longer exists -- " "restoring \"%s\" context:\n\n", pc->program_name); show_context(CURRENT_CONTEXT()); fprintf(fp, "\n"); } } } /* * Sort the task_context array by PID number; for PID 0, sort by processor. */ void sort_context_array(void) { ulong curtask; curtask = CURRENT_TASK(); qsort((void *)tt->context_array, (size_t)tt->running_tasks, sizeof(struct task_context), sort_by_pid); set_context(curtask, NO_PID); } static int sort_by_pid(const void *arg1, const void *arg2) { struct task_context *t1, *t2; t1 = (struct task_context *)arg1; t2 = (struct task_context *)arg2; if ((t1->pid == 0) && (t2->pid == 0)) return (t1->processor < t2->processor ? -1 : t1->processor == t2->processor ? 0 : 1); else return (t1->pid < t2->pid ? -1 : t1->pid == t2->pid ? 0 : 1); } static int sort_by_last_run(const void *arg1, const void *arg2) { ulong task_last_run_stamp(ulong); struct task_context *t1, *t2; ulonglong lr1, lr2; t1 = (struct task_context *)arg1; t2 = (struct task_context *)arg2; lr1 = task_last_run(t1->task); lr2 = task_last_run(t2->task); return (lr2 < lr1 ? -1 : lr2 == lr1 ? 0 : 1); } static void sort_context_array_by_last_run(void) { ulong curtask; curtask = CURRENT_TASK(); qsort((void *)tt->context_array, (size_t)tt->running_tasks, sizeof(struct task_context), sort_by_last_run); set_context(curtask, NO_PID); } /* * Set the tgid_context array by tgid number. */ void sort_tgid_array(void) { if (VALID_MEMBER(mm_struct_rss) || (!VALID_MEMBER(task_struct_rss_stat))) return; qsort((void *)tt->tgid_array, (size_t)tt->running_tasks, sizeof(struct tgid_context), sort_by_tgid); tt->last_tgid = tt->tgid_array; } int sort_by_tgid(const void *arg1, const void *arg2) { struct tgid_context *t1, *t2; t1 = (struct tgid_context *)arg1; t2 = (struct tgid_context *)arg2; return (t1->tgid < t2->tgid ? -1 : t1->tgid == t2->tgid ? 0 : 1); } /* * Keep a stash of the last task_struct accessed. Chances are it will * be hit several times before the next task is accessed. */ char * fill_task_struct(ulong task) { if (XEN_HYPER_MODE()) return NULL; if (!IS_LAST_TASK_READ(task)) { if (!readmem(task, KVADDR, tt->task_struct, SIZE(task_struct), "fill_task_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { tt->last_task_read = 0; return NULL; } } tt->last_task_read = task; return(tt->task_struct); } /* * Keep a stash of the last thread_info struct accessed. Chances are it will * be hit several times before the next task is accessed. */ char * fill_thread_info(ulong thread_info) { if (!IS_LAST_THREAD_INFO_READ(thread_info)) { if (!readmem(thread_info, KVADDR, tt->thread_info, SIZE(thread_info), "fill_thread_info", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { tt->last_thread_info_read = 0; return NULL; } } tt->last_thread_info_read = thread_info; return(tt->thread_info); } /* * Used by back_trace(), copy the complete kernel stack into a local buffer * and fill the task_struct buffer, dealing with possible future separation * of task_struct and stack and/or cache coloring of stack top. */ void fill_stackbuf(struct bt_info *bt) { if (!bt->stackbuf) { bt->stackbuf = GETBUF(bt->stacktop - bt->stackbase); if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "stack contents", RETURN_ON_ERROR)) error(FATAL, "read of stack at %lx failed\n", bt->stackbase); } if (XEN_HYPER_MODE()) return; if (!IS_LAST_TASK_READ(bt->task)) { if (bt->stackbase == bt->task) { BCOPY(bt->stackbuf, tt->task_struct, SIZE(task_struct)); tt->last_task_read = bt->task; } else fill_task_struct(bt->task); } } /* * Keeping the task_struct info intact, alter the contents of the already * allocated local copy of a kernel stack, for things like IRQ stacks or * non-standard eframe searches. The caller must change the stackbase * and stacktop values. */ void alter_stackbuf(struct bt_info *bt) { if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "stack contents", RETURN_ON_ERROR)) error(FATAL, "read of stack at %lx failed\n", bt->stackbase); } /* * In the same vein as fill_task_struct(), keep a stash of the mm_struct * of a task. */ char *fill_mm_struct(ulong mm) { if (!IS_LAST_MM_READ(mm)) { if (!readmem(mm, KVADDR, tt->mm_struct, SIZE(mm_struct), "fill_mm_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { tt->last_mm_read = 0; return NULL; } } tt->last_mm_read = mm; return(tt->mm_struct); } /* * If active, clear out references to the last task and mm_struct read. */ void clear_task_cache(void) { if (ACTIVE()) tt->last_task_read = tt->last_mm_read = 0; } /* * Shorthand command to dump the current context's task_struct, or if * pid or task arguments are entered, the task_structs of the targets. * References to structure members can be given to pare down the output, * which are put in a comma-separated list. */ void cmd_task(void) { int c, tcnt, bogus; unsigned int radix; ulong value; struct reference *ref; struct task_context *tc; ulong *tasklist; char *memberlist; tasklist = (ulong *)GETBUF((MAXARGS+NR_CPUS)*sizeof(ulong)); ref = (struct reference *)GETBUF(sizeof(struct reference)); memberlist = GETBUF(BUFSIZE); ref->str = memberlist; radix = 0; while ((c = getopt(argcnt, args, "xdhR:")) != EOF) { switch(c) { case 'h': case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; case 'R': if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, optarg); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); tcnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) tasklist[tcnt++] = tc->task; break; case STR_TASK: tasklist[tcnt++] = value; break; case STR_INVALID: bogus++; error(INFO, "invalid task or pid value: %s\n\n", args[optind]); break; } } else if (strstr(args[optind], ",") || MEMBER_EXISTS("task_struct", args[optind])) { if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, args[optind]); } else if (strstr(args[optind], ".") || strstr(args[optind], "[")) { if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, args[optind]); } else error(INFO, "invalid task, pid, or task_struct member: %s\n\n", args[optind]); optind++; } if (!tcnt && !bogus) tasklist[tcnt++] = CURRENT_TASK(); for (c = 0; c < tcnt; c++) do_task(tasklist[c], 0, strlen(ref->str) ? ref : NULL, radix); } /* * Do the work for the task command. */ void do_task(ulong task, ulong flags, struct reference *ref, unsigned int radix) { struct task_context *tc; tc = task_to_context(task); if (ref) { print_task_header(fp, tc, 0); task_struct_member(tc, radix, ref); } else { if (!(flags & FOREACH_TASK)) print_task_header(fp, tc, 0); dump_struct("task_struct", task, radix); if (tt->flags & THREAD_INFO) { fprintf(fp, "\n"); dump_struct("thread_info", tc->thread_info, radix); } } fprintf(fp, "\n"); } /* * Search the task_struct for the referenced field. */ static void task_struct_member(struct task_context *tc, unsigned int radix, struct reference *ref) { int i; int argcnt; char *arglist[MAXARGS]; char *refcopy; struct datatype_member dm; if ((count_chars(ref->str, ',')+1) > MAXARGS) { error(INFO, "too many -R arguments in comma-separated list!\n"); return; } refcopy = GETBUF(strlen(ref->str)+1); strcpy(refcopy, ref->str); replace_string(refcopy, ",", ' '); argcnt = parse_line(refcopy, arglist); open_tmpfile(); dump_struct("task_struct", tc->task, radix); if (tt->flags & THREAD_INFO) dump_struct("thread_info", tc->thread_info, radix); for (i = 0; i < argcnt; i++) { if (count_chars(arglist[i], '.') || count_chars(arglist[i], '[')) { dm.member = arglist[i]; parse_for_member_extended(&dm, 0); } else { if (!MEMBER_EXISTS("task_struct", arglist[i]) && !MEMBER_EXISTS("thread_info", arglist[i])) error(INFO, "%s: not a task_struct or " "thread_info member\n", arglist[i]); parse_task_thread(1, &arglist[i], tc); } } close_tmpfile(); FREEBUF(refcopy); } static void parse_task_thread(int argcnt, char *arglist[], struct task_context *tc) { char buf[BUFSIZE]; char lookfor1[BUFSIZE]; char lookfor2[BUFSIZE]; char lookfor3[BUFSIZE]; int i; rewind(pc->tmpfile); BZERO(lookfor1, BUFSIZE); BZERO(lookfor2, BUFSIZE); BZERO(lookfor3, BUFSIZE); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strlen(lookfor2)) { fprintf(pc->saved_fp, "%s", buf); if (STRNEQ(buf, lookfor2)) BZERO(lookfor2, BUFSIZE); continue; } if (strlen(lookfor3)) { fprintf(pc->saved_fp, "%s", buf); if (strstr(buf, lookfor3)) BZERO(lookfor3, BUFSIZE); continue; } for (i = 0; i < argcnt; i++) { BZERO(lookfor1, BUFSIZE); BZERO(lookfor2, BUFSIZE); BZERO(lookfor3, BUFSIZE); sprintf(lookfor1, " %s = ", arglist[i]); if (STRNEQ(buf, lookfor1)) { fprintf(pc->saved_fp, "%s", buf); if (strstr(buf, "{{\n")) sprintf(lookfor2, " }},"); else if (strstr(buf, "{\n")) sprintf(lookfor2, " },"); else if (strstr(buf, "{")) sprintf(lookfor3, "},"); break; } } } } static char *ps_exclusive = "-a, -t, -c, -p, -g, -l, -m, -S and -r flags are all mutually-exclusive\n"; static void check_ps_exclusive(ulong flag, ulong thisflag) { if (flag & (PS_EXCLUSIVE & ~thisflag)) error(FATAL, ps_exclusive); } /* * Display ps-like data for all tasks, or as specified by pid, task, or * command-name arguments. */ void cmd_ps(void) { int c, ac; ulong flag; ulong value; static struct psinfo psinfo; struct task_context *tc; char *cpuspec, *p; BZERO(&psinfo, sizeof(struct psinfo)); cpuspec = NULL; flag = 0; while ((c = getopt(argcnt, args, "SgstcpkuGlmarC:")) != EOF) { switch(c) { case 'k': if (flag & PS_USER) error(FATAL, "-u and -k are mutually exclusive\n"); flag |= PS_KERNEL; break; case 'u': if (flag & PS_KERNEL) error(FATAL, "-u and -k are mutually exclusive\n"); flag |= PS_USER; break; case 'G': if (flag & PS_GROUP) break; else if (hq_open()) flag |= PS_GROUP; else error(INFO, "cannot hash thread group tasks\n"); break; /* * The a, t, c, p, g, l and r flags are all mutually-exclusive. */ case 'g': check_ps_exclusive(flag, PS_TGID_LIST); flag |= PS_TGID_LIST; break; case 'a': check_ps_exclusive(flag, PS_ARGV_ENVP); flag |= PS_ARGV_ENVP; break; case 't': check_ps_exclusive(flag, PS_TIMES); flag |= PS_TIMES; break; case 'c': check_ps_exclusive(flag, PS_CHILD_LIST); flag |= PS_CHILD_LIST; break; case 'p': check_ps_exclusive(flag, PS_PPID_LIST); flag |= PS_PPID_LIST; break; case 'm': if (INVALID_MEMBER(task_struct_last_run) && INVALID_MEMBER(task_struct_timestamp) && INVALID_MEMBER(sched_info_last_arrival)) { error(INFO, "last-run timestamps do not exist in this kernel\n"); argerrs++; break; } if (INVALID_MEMBER(rq_timestamp)) option_not_supported(c); check_ps_exclusive(flag, PS_MSECS); flag |= PS_MSECS; break; case 'l': if (INVALID_MEMBER(task_struct_last_run) && INVALID_MEMBER(task_struct_timestamp) && INVALID_MEMBER(sched_info_last_arrival)) { error(INFO, "last-run timestamps do not exist in this kernel\n"); argerrs++; break; } check_ps_exclusive(flag, PS_LAST_RUN); flag |= PS_LAST_RUN; break; case 's': flag |= PS_KSTACKP; break; case 'r': check_ps_exclusive(flag, PS_RLIMIT); flag |= PS_RLIMIT; break; case 'S': check_ps_exclusive(flag, PS_SUMMARY); flag |= PS_SUMMARY; break; case 'C': cpuspec = optarg; psinfo.cpus = get_cpumask_buf(); make_cpumask(cpuspec, psinfo.cpus, FAULT_ON_ERROR, NULL); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (flag & (PS_LAST_RUN|PS_MSECS)) sort_context_array_by_last_run(); else if (psinfo.cpus) { error(INFO, "-C option is only applicable with -l and -m\n"); goto bailout; } if (!args[optind]) { show_ps(PS_SHOW_ALL|flag, &psinfo); return; } if (flag & PS_SUMMARY) error(FATAL, "-S option takes no arguments\n"); if (psinfo.cpus) error(INFO, "-C option is not applicable with specified tasks\n"); ac = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: psinfo.pid[ac] = value; psinfo.task[ac] = NO_TASK; psinfo.type[ac] = PS_BY_PID; flag |= PS_BY_PID; break; case STR_TASK: psinfo.task[ac] = value; psinfo.pid[ac] = NO_PID; psinfo.type[ac] = PS_BY_TASK; flag |= PS_BY_TASK; break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n\n", args[optind]); break; } ac++; } else if (SINGLE_QUOTED_STRING(args[optind])) { /* * Regular expression is exclosed within "'" character. * The args[optind] string may not be modified, so a copy * is duplicated. */ if (psinfo.regexs == MAX_PS_ARGS) error(INFO, "too many expressions specified!\n"); else { p = strdup(&args[optind][1]); LASTCHAR(p) = NULLCHAR; if (regcomp(&psinfo.regex_data[psinfo.regexs].regex, p, REG_EXTENDED|REG_NOSUB)) { error(INFO, "invalid regular expression: %s\n", p); free(p); goto bailout; } psinfo.regex_data[psinfo.regexs].pattern = p; if (psinfo.regexs++ == 0) { pc->cmd_cleanup_arg = (void *)&psinfo; pc->cmd_cleanup = ps_cleanup; } psinfo.type[ac] = PS_BY_REGEX; flag |= PS_BY_REGEX; ac++; } optind++; continue; } else { psinfo.pid[ac] = NO_PID; psinfo.task[ac] = NO_TASK; p = args[optind][0] == '\\' ? &args[optind][1] : args[optind]; strlcpy(psinfo.comm[ac], p, TASK_COMM_LEN); psinfo.type[ac] = PS_BY_CMD; flag |= PS_BY_CMD; ac++; } optind++; } psinfo.argc = ac; show_ps(flag, &psinfo); bailout: ps_cleanup((void *)&psinfo); } /* * Clean up regex buffers and pattern strings. */ static void ps_cleanup(void *arg) { int i; struct psinfo *ps; pc->cmd_cleanup = NULL; pc->cmd_cleanup_arg = NULL; ps = (struct psinfo *)arg; for (i = 0; i < ps->regexs; i++) { regfree(&ps->regex_data[i].regex); free(ps->regex_data[i].pattern); } if (ps->cpus) FREEBUF(ps->cpus); } /* * Do the work requested by cmd_ps(). */ static void show_ps_data(ulong flag, struct task_context *tc, struct psinfo *psi) { struct task_mem_usage task_mem_usage, *tm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; ulong tgid; int task_active; if ((flag & PS_USER) && is_kernel_thread(tc->task)) return; if ((flag & PS_KERNEL) && !is_kernel_thread(tc->task)) return; if (flag & PS_GROUP) { if (flag & (PS_LAST_RUN|PS_MSECS)) error(FATAL, "-G not supported with -%c option\n", flag & PS_LAST_RUN ? 'l' : 'm'); tgid = task_tgid(tc->task); if (tc->pid != tgid) { if (pc->curcmd_flags & TASK_SPECIFIED) { if (!(tc = tgid_to_context(tgid))) return; if (hq_entry_exists((ulong)tc)) return; hq_enter((ulong)tc); } else return; } else { if (hq_entry_exists((ulong)tc)) return; hq_enter((ulong)tc); } } if (flag & PS_PPID_LIST) { parent_list(tc->task); fprintf(fp, "\n"); return; } if (flag & PS_CHILD_LIST) { child_list(tc->task); fprintf(fp, "\n"); return; } if (flag & (PS_LAST_RUN)) { show_last_run(tc, psi); return; } if (flag & (PS_MSECS)) { show_milliseconds(tc, psi); return; } if (flag & PS_ARGV_ENVP) { show_task_args(tc); return; } if (flag & PS_RLIMIT) { show_task_rlimit(tc); return; } if (flag & PS_TGID_LIST) { show_tgid_list(tc->task); return; } tm = &task_mem_usage; get_task_mem_usage(tc->task, tm); task_active = is_task_active(tc->task); if (task_active) { if (hide_offline_cpu(tc->processor)) fprintf(fp, "- "); else fprintf(fp, "> "); } else fprintf(fp, " "); fprintf(fp, "%5ld %5ld %2s %s %3s", tc->pid, task_to_pid(tc->ptask), task_cpu(tc->processor, buf2, !VERBOSE), task_pointer_string(tc, flag & PS_KSTACKP, buf3), task_state_string(tc->task, buf1, !VERBOSE)); pad_line(fp, strlen(buf1) > 3 ? 1 : 2, ' '); sprintf(buf1, "%.1f", tm->pct_physmem); if (strlen(buf1) == 3) mkstring(buf1, 4, CENTER|RJUST, NULL); fprintf(fp, "%s ", buf1); fprintf(fp, "%7ld ", (tm->total_vm * PAGESIZE())/1024); fprintf(fp, "%6ld ", (tm->rss * PAGESIZE())/1024); if (is_kernel_thread(tc->task)) fprintf(fp, "[%s]\n", tc->comm); else fprintf(fp, "%s\n", tc->comm); } static void show_ps(ulong flag, struct psinfo *psi) { int i, ac; struct task_context *tc; int print; char buf[BUFSIZE]; if (!(flag & (PS_EXCLUSIVE|PS_NO_HEADER))) fprintf(fp, " PID PPID CPU %s ST %%MEM VSZ RSS COMM\n", flag & PS_KSTACKP ? mkstring(buf, VADDR_PRLEN, CENTER|RJUST, "KSTACKP") : mkstring(buf, VADDR_PRLEN, CENTER, "TASK")); if (flag & PS_SHOW_ALL) { if (flag & PS_TIMES) { show_task_times(NULL, flag); return; } if (flag & PS_SUMMARY) { show_ps_summary(flag); return; } if (psi->cpus) { show_ps_data(flag, NULL, psi); return; } tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) show_ps_data(flag, tc, NULL); return; } pc->curcmd_flags |= TASK_SPECIFIED; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { for (ac = 0; ac < psi->argc; ac++) { print = FALSE; switch(psi->type[ac]) { case PS_BY_PID: if (tc->pid == psi->pid[ac]) print = TRUE; break; case PS_BY_TASK: if ((tc->task == psi->task[ac])) print = TRUE; break; case PS_BY_CMD: if (STREQ(tc->comm, psi->comm[ac])) { if (flag & (PS_TGID_LIST|PS_GROUP)) { if (tc->pid == task_tgid(tc->task)) print = TRUE; else print = FALSE; } else print = TRUE; } break; case PS_BY_REGEX: if (regexec(&psi->regex_data[ac].regex, tc->comm, 0, NULL, 0) == 0) { if (flag & (PS_TGID_LIST|PS_GROUP)) { if (tc->pid == task_tgid(tc->task)) print = TRUE; else print = FALSE; } else print = TRUE; } break; } if (print) { if (flag & PS_TIMES) show_task_times(tc, flag); else show_ps_data(flag, tc, NULL); } } } } static void show_ps_summary(ulong flag) { int i, s; struct task_context *tc; char buf[BUFSIZE]; #define MAX_STATES 20 struct ps_state { long cnt; char string[3]; } ps_state[MAX_STATES]; if (flag & (PS_USER|PS_KERNEL|PS_GROUP)) error(FATAL, "-S option cannot be used with other options\n"); for (s = 0; s < MAX_STATES; s++) ps_state[s].cnt = 0; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { task_state_string(tc->task, buf, !VERBOSE); for (s = 0; s < MAX_STATES; s++) { if (ps_state[s].cnt && STREQ(ps_state[s].string, buf)) { ps_state[s].cnt++; break; } if (ps_state[s].cnt == 0) { strcpy(ps_state[s].string, buf); ps_state[s].cnt++; break; } } } for (s = 0; s < MAX_STATES; s++) { if (ps_state[s].cnt) fprintf(fp, " %s: %ld\n", ps_state[s].string, ps_state[s].cnt); } } /* * Display the task preceded by the last_run stamp and its * current state. */ static void show_last_run(struct task_context *tc, struct psinfo *psi) { int i, c, others; struct task_context *tcp; char format[15]; char buf[BUFSIZE]; tcp = FIRST_CONTEXT(); sprintf(buf, pc->output_radix == 10 ? "%lld" : "%llx", task_last_run(tcp->task)); c = strlen(buf); sprintf(format, "[%c%dll%c] ", '%', c, pc->output_radix == 10 ? 'u' : 'x'); if (psi) { for (c = others = 0; c < kt->cpus; c++) { if (!NUM_IN_BITMAP(psi->cpus, c)) continue; fprintf(fp, "%sCPU: %d", others++ ? "\n" : "", c); if (hide_offline_cpu(c)) { fprintf(fp, " [OFFLINE]\n"); continue; } else fprintf(fp, "\n"); tcp = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tcp++) { if (tcp->processor != c) continue; fprintf(fp, format, task_last_run(tcp->task)); fprintf(fp, "[%s] ", task_state_string(tcp->task, buf, !VERBOSE)); print_task_header(fp, tcp, FALSE); } } } else if (tc) { fprintf(fp, format, task_last_run(tc->task)); fprintf(fp, "[%s] ", task_state_string(tc->task, buf, !VERBOSE)); print_task_header(fp, tc, FALSE); } else { tcp = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tcp++) { fprintf(fp, format, task_last_run(tcp->task)); fprintf(fp, "[%s] ", task_state_string(tcp->task, buf, !VERBOSE)); print_task_header(fp, tcp, FALSE); } } } /* * Translate a value in nanoseconds into a string showing days, * hours, minutes, seconds and milliseconds. */ static char * translate_nanoseconds(ulonglong value, char *buf) { ulong days, hours, mins, secs, ms; value = value / 1000000L; ms = value % 1000L; value = value / 1000L; secs = value % 60L; value = value / 60L; mins = value % 60L; value = value / 60L; hours = value % 24L; value = value / 24L; days = value; sprintf(buf, "%ld %02ld:%02ld:%02ld.%03ld", days, hours, mins, secs, ms); return buf; } /* * Display the task preceded by a per-rq translation of the * sched_info.last_arrival and its current state. */ static void show_milliseconds(struct task_context *tc, struct psinfo *psi) { int i, c, others, days, max_days; struct task_context *tcp; char format[15]; char buf[BUFSIZE]; struct syment *rq_sp; ulong runq; ulonglong rq_clock; long long delta; if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "cannot determine per-cpu runqueue address\n"); tcp = FIRST_CONTEXT(); sprintf(buf, pc->output_radix == 10 ? "%lld" : "%llx", task_last_run(tcp->task)); c = strlen(buf); sprintf(format, "[%c%dll%c] ", '%', c, pc->output_radix == 10 ? 'u' : 'x'); if (psi) { for (c = others = 0; c < kt->cpus; c++) { if (!NUM_IN_BITMAP(psi->cpus, c)) continue; fprintf(fp, "%sCPU: %d", others++ ? "\n" : "", c); if (hide_offline_cpu(c)) { fprintf(fp, " [OFFLINE]\n"); continue; } else fprintf(fp, "\n"); if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[c]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, &rq_clock, sizeof(ulonglong), "per-cpu rq clock", FAULT_ON_ERROR); translate_nanoseconds(rq_clock, buf); max_days = first_space(buf) - buf; tcp = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tcp++) { if (tcp->processor != c) continue; delta = rq_clock - task_last_run(tcp->task); if (delta < 0) delta = 0; translate_nanoseconds(delta, buf); days = first_space(buf) - buf; fprintf(fp, "[%s%s] ", space(max_days - days), buf); fprintf(fp, "[%s] ", task_state_string(tcp->task, buf, !VERBOSE)); print_task_header(fp, tcp, FALSE); } } } else if (tc) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[tc->processor]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, &rq_clock, sizeof(ulonglong), "per-cpu rq clock", FAULT_ON_ERROR); translate_nanoseconds(rq_clock, buf); max_days = first_space(buf) - buf; delta = rq_clock - task_last_run(tc->task); if (delta < 0) delta = 0; translate_nanoseconds(delta, buf); days = first_space(buf) - buf; fprintf(fp, "[%s%s] ", space(max_days - days), buf); fprintf(fp, "[%s] ", task_state_string(tc->task, buf, !VERBOSE)); print_task_header(fp, tc, FALSE); } else { tcp = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tcp++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[tcp->processor]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, &rq_clock, sizeof(ulonglong), "per-cpu rq clock", FAULT_ON_ERROR); delta = rq_clock - task_last_run(tcp->task); if (delta < 0) delta = 0; fprintf(fp, "[%s] ", translate_nanoseconds(delta, buf)); fprintf(fp, "[%s] ", task_state_string(tcp->task, buf, !VERBOSE)); print_task_header(fp, tcp, FALSE); } } } /* * Show the argv and envp strings pointed to by mm_struct->arg_start * and mm_struct->env_start. The user addresses need to broken up * into physical on a page-per-page basis because we typically are * not going to be working in the context of the target task. */ static void show_task_args(struct task_context *tc) { ulong arg_start, arg_end, env_start, env_end; char *buf, *bufptr, *p1; char *as, *ae, *es, *ee; physaddr_t paddr; ulong uvaddr, size, cnt; int c, d; print_task_header(fp, tc, 0); if (!tc || !tc->mm_struct) { /* probably a kernel thread */ error(INFO, "no user stack\n\n"); return; } if (!task_mm(tc->task, TRUE)) return; if (INVALID_MEMBER(mm_struct_arg_start)) { MEMBER_OFFSET_INIT(mm_struct_arg_start, "mm_struct", "arg_start"); MEMBER_OFFSET_INIT(mm_struct_arg_end, "mm_struct", "arg_end"); MEMBER_OFFSET_INIT(mm_struct_env_start, "mm_struct", "env_start"); MEMBER_OFFSET_INIT(mm_struct_env_end, "mm_struct", "env_end"); } arg_start = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_start)); arg_end = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_end)); env_start = ULONG(tt->mm_struct + OFFSET(mm_struct_env_start)); env_end = ULONG(tt->mm_struct + OFFSET(mm_struct_env_end)); if (CRASHDEBUG(1)) { fprintf(fp, "arg_start: %lx arg_end: %lx (%ld)\n", arg_start, arg_end, arg_end - arg_start); fprintf(fp, "env_start: %lx env_end: %lx (%ld)\n", env_start, env_end, env_end - env_start); } buf = GETBUF(env_end - arg_start + 1); uvaddr = arg_start; size = env_end - arg_start; bufptr = buf; while (size > 0) { if (!uvtop(tc, uvaddr, &paddr, 0)) { error(INFO, "cannot access user stack address: %lx\n\n", uvaddr); goto bailout; } cnt = PAGESIZE() - PAGEOFFSET(uvaddr); if (cnt > size) cnt = size; if (!readmem(paddr, PHYSADDR, bufptr, cnt, "user stack contents", RETURN_ON_ERROR|QUIET)) { error(INFO, "cannot access user stack address: %lx\n\n", uvaddr); goto bailout; } uvaddr += cnt; bufptr += cnt; size -= cnt; } as = buf; ae = &buf[arg_end - arg_start]; es = &buf[env_start - arg_start]; ee = &buf[env_end - arg_start]; fprintf(fp, "ARG: "); for (p1 = as, c = 0; p1 < ae; p1++) { if (*p1 == NULLCHAR) { if (c) fprintf(fp, " "); c = 0; } else { fprintf(fp, "%c", *p1); c++; } } fprintf(fp, "\nENV: "); for (p1 = es, c = d = 0; p1 < ee; p1++) { if (*p1 == NULLCHAR) { if (c) fprintf(fp, "\n"); c = 0; } else { fprintf(fp, "%s%c", !c && (p1 != es) ? " " : "", *p1); c++, d++; } } fprintf(fp, "\n%s", d ? "" : "\n"); bailout: FREEBUF(buf); } char *rlim_names[] = { /* 0 */ "CPU", /* 1 */ "FSIZE", /* 2 */ "DATA", /* 3 */ "STACK", /* 4 */ "CORE", /* 5 */ "RSS", /* 6 */ "NPROC", /* 7 */ "NOFILE", /* 8 */ "MEMLOCK", /* 9 */ "AS", /* 10 */ "LOCKS", /* 11 */ "SIGPENDING", /* 12 */ "MSGQUEUE", /* 13 */ "NICE", /* 14 */ "RTPRIO", /* 15 */ "RTTIME", NULL, }; #ifndef RLIM_INFINITY #define RLIM_INFINITY (~0UL) #endif /* * Show the current and maximum rlimit values. */ static void show_task_rlimit(struct task_context *tc) { int i, j, len1, len2, rlimit_index; int in_task_struct, in_signal_struct; char *rlimit_buffer; ulong *p1, rlim_addr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; rlimit_index = 0; if (!VALID_MEMBER(task_struct_rlim) && !VALID_MEMBER(signal_struct_rlim)) { MEMBER_OFFSET_INIT(task_struct_rlim, "task_struct", "rlim"); MEMBER_OFFSET_INIT(signal_struct_rlim, "signal_struct", "rlim"); STRUCT_SIZE_INIT(rlimit, "rlimit"); if (!VALID_MEMBER(task_struct_rlim) && !VALID_MEMBER(signal_struct_rlim)) error(FATAL, "cannot determine rlimit array location\n"); } else if (!VALID_STRUCT(rlimit)) error(FATAL, "cannot determine rlimit structure definition\n"); in_task_struct = in_signal_struct = FALSE; if (VALID_MEMBER(task_struct_rlim)) { rlimit_index = get_array_length("task_struct.rlim", NULL, 0); in_task_struct = TRUE; } else if (VALID_MEMBER(signal_struct_rlim)) { if (!VALID_MEMBER(task_struct_signal)) error(FATAL, "cannot determine rlimit array location\n"); rlimit_index = get_array_length("signal_struct.rlim", NULL, 0); in_signal_struct = TRUE; } if (!rlimit_index) error(FATAL, "cannot determine rlimit array size\n"); for (i = len1 = 0; i < rlimit_index; i++) { if (rlim_names[i] == NULL) continue; if ((j = strlen(rlim_names[i])) > len1) len1 = j; } len2 = strlen("(unlimited)"); rlimit_buffer = GETBUF(rlimit_index * SIZE(rlimit)); print_task_header(fp, tc, 0); fill_task_struct(tc->task); if (in_task_struct) { BCOPY(tt->task_struct + OFFSET(task_struct_rlim), rlimit_buffer, rlimit_index * SIZE(rlimit)); } else if (in_signal_struct) { rlim_addr = ULONG(tt->task_struct + OFFSET(task_struct_signal)); if (!readmem(rlim_addr + OFFSET(signal_struct_rlim), KVADDR, rlimit_buffer, rlimit_index * SIZE(rlimit), "signal_struct rlimit array", RETURN_ON_ERROR)) { FREEBUF(rlimit_buffer); return; } } fprintf(fp, " %s %s %s\n", mkstring(buf1, len1, RJUST, "RLIMIT"), mkstring(buf2, len2, CENTER|RJUST, "CURRENT"), mkstring(buf3, len2, CENTER|RJUST, "MAXIMUM")); for (p1 = (ulong *)rlimit_buffer, i = 0; i < rlimit_index; i++) { fprintf(fp, " %s ", mkstring(buf1, len1, RJUST, rlim_names[i] ? rlim_names[i] : "(unknown)")); if (*p1 == (ulong)RLIM_INFINITY) fprintf(fp, "(unlimited) "); else fprintf(fp, "%s ", mkstring(buf1, len2, CENTER|LJUST|LONG_DEC, MKSTR(*p1))); p1++; if (*p1 == (ulong)RLIM_INFINITY) fprintf(fp, "(unlimited)\n"); else fprintf(fp, "%s\n", mkstring(buf1, len2, CENTER|LJUST|LONG_DEC, MKSTR(*p1))); p1++; } fprintf(fp, "\n"); FREEBUF(rlimit_buffer); } /* * Put either the task_struct address or kernel stack pointer into a string. * If the kernel stack pointer is requested, piggy-back on top of the * back trace code to avoid having to deal with machine dependencies, * live active tasks, and dumpfile panic tasks. */ static char * task_pointer_string(struct task_context *tc, ulong do_kstackp, char *buf) { struct bt_info bt_info, *bt; char buf1[BUFSIZE]; if (do_kstackp) { bt = &bt_info; BZERO(bt, sizeof(struct bt_info));; if (is_task_active(tc->task)) { bt->stkptr = 0; } else if (VALID_MEMBER(task_struct_thread_esp)) { readmem(tc->task + OFFSET(task_struct_thread_esp), KVADDR, &bt->stkptr, sizeof(void *), "thread_struct esp", FAULT_ON_ERROR); } else if (VALID_MEMBER(task_struct_thread_ksp)) { readmem(tc->task + OFFSET(task_struct_thread_ksp), KVADDR, &bt->stkptr, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } else { bt->task = tc->task; bt->tc = tc; bt->stackbase = GET_STACKBASE(tc->task); bt->stacktop = GET_STACKTOP(tc->task); bt->flags |= BT_KSTACKP; back_trace(bt); } if (bt->stkptr) sprintf(buf, "%s", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST|LONG_HEX, MKSTR(bt->stkptr))); else sprintf(buf, "%s", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "--")); } else sprintf(buf, "%s", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST|LONG_HEX, MKSTR(tc->task))); return buf; } /* * Dump the task list ordered by start_time. */ struct kernel_timeval { unsigned int tv_sec; unsigned int tv_usec; }; struct task_start_time { struct task_context *tc; ulonglong start_time; ulong tms_utime; ulong tms_stime; struct timeval old_utime; struct timeval old_stime; struct kernel_timeval kutime; struct kernel_timeval kstime; ulonglong utime; ulonglong stime; }; static void show_task_times(struct task_context *tcp, ulong flags) { int i, tasks, use_kernel_timeval, use_utime_stime; struct task_context *tc; struct task_start_time *task_start_times, *tsp; ulong jiffies, tgid; ulonglong jiffies_64; char buf1[BUFSIZE]; task_start_times = (struct task_start_time *) GETBUF(RUNNING_TASKS() * sizeof(struct task_start_time)); use_kernel_timeval = STRUCT_EXISTS("kernel_timeval"); if (VALID_MEMBER(task_struct_utime) && (SIZE(task_struct_utime) == (BITS32() ? sizeof(uint32_t) : sizeof(uint64_t)))) use_utime_stime = TRUE; else use_utime_stime = FALSE; get_symbol_data("jiffies", sizeof(long), &jiffies); if (symbol_exists("jiffies_64")) get_uptime(NULL, &jiffies_64); tsp = task_start_times; tc = tcp ? tcp : FIRST_CONTEXT(); for (i = tasks = 0; i < RUNNING_TASKS(); i++, tc++) { if ((flags & PS_USER) && is_kernel_thread(tc->task)) continue; if ((flags & PS_KERNEL) && !is_kernel_thread(tc->task)) continue; if (flags & PS_GROUP) { tgid = task_tgid(tc->task); if (tc->pid != tgid) { if (tcp) { if (!(tc = tgid_to_context(tgid))) return; } else continue; } if (hq_entry_exists((ulong)tc)) return; hq_enter((ulong)tc); } fill_task_struct(tc->task); if (!tt->last_task_read) { if (tcp) return; continue; } tsp->tc = tc; if (BITS32() && (SIZE(task_struct_start_time) == 8)) { if (start_time_timespec()) tsp->start_time = ULONG(tt->task_struct + OFFSET(task_struct_start_time)); else tsp->start_time = ULONGLONG(tt->task_struct + OFFSET(task_struct_start_time)); } else { start_time_timespec(); tsp->start_time = ULONG(tt->task_struct + OFFSET(task_struct_start_time)); } if (VALID_MEMBER(task_struct_times)) { tsp->tms_utime = ULONG(tt->task_struct + OFFSET(task_struct_times) + OFFSET(tms_tms_utime)); tsp->tms_stime = ULONG(tt->task_struct + OFFSET(task_struct_times) + OFFSET(tms_tms_stime)); } else if (VALID_MEMBER(task_struct_utime)) { if (use_utime_stime) { tsp->utime = ULONG(tt->task_struct + OFFSET(task_struct_utime)); tsp->stime = ULONG(tt->task_struct + OFFSET(task_struct_stime)); } else if (use_kernel_timeval) { BCOPY(tt->task_struct + OFFSET(task_struct_utime), &tsp->kutime, sizeof(struct kernel_timeval)); BCOPY(tt->task_struct + OFFSET(task_struct_stime), &tsp->kstime, sizeof(struct kernel_timeval)); } else if (VALID_STRUCT(cputime_t)) { /* since linux 2.6.11 */ if (SIZE(cputime_t) == 8) { uint64_t utime_64, stime_64; BCOPY(tt->task_struct + OFFSET(task_struct_utime), &utime_64, 8); BCOPY(tt->task_struct + OFFSET(task_struct_stime), &stime_64, 8); /* convert from micro-sec. to sec. */ tsp->old_utime.tv_sec = utime_64 / 1000000; tsp->old_stime.tv_sec = stime_64 / 1000000; } else { uint32_t utime_32, stime_32; BCOPY(tt->task_struct + OFFSET(task_struct_utime), &utime_32, 4); BCOPY(tt->task_struct + OFFSET(task_struct_stime), &stime_32, 4); tsp->old_utime.tv_sec = utime_32; tsp->old_stime.tv_sec = stime_32; } } else { BCOPY(tt->task_struct + OFFSET(task_struct_utime), &tsp->utime, sizeof(struct timeval)); BCOPY(tt->task_struct + OFFSET(task_struct_stime), &tsp->stime, sizeof(struct timeval)); } } tasks++; tsp++; if (tcp) break; } qsort((void *)task_start_times, (size_t)tasks, sizeof(struct task_start_time), compare_start_time); for (i = 0, tsp = task_start_times; i < tasks; i++, tsp++) { print_task_header(fp, tsp->tc, 0); fprintf(fp, " RUN TIME: %s\n", symbol_exists("jiffies_64") ? convert_time(convert_start_time(tsp->start_time, jiffies_64), buf1) : convert_time(jiffies - tsp->start_time, buf1)); fprintf(fp, " START TIME: %llu\n", tsp->start_time); if (VALID_MEMBER(task_struct_times)) { fprintf(fp, " USER TIME: %ld\n", tsp->tms_utime); fprintf(fp, " SYSTEM TIME: %ld\n\n", tsp->tms_stime); } else if (VALID_MEMBER(task_struct_utime)) { if (use_utime_stime) { fprintf(fp, " UTIME: %lld\n", (ulonglong)tsp->utime); fprintf(fp, " STIME: %lld\n\n", (ulonglong)tsp->stime); } else if (use_kernel_timeval) { fprintf(fp, " USER TIME: %d\n", tsp->kutime.tv_sec); fprintf(fp, " SYSTEM TIME: %d\n\n", tsp->kstime.tv_sec); } else { fprintf(fp, " USER TIME: %ld\n", tsp->old_utime.tv_sec); fprintf(fp, " SYSTEM TIME: %ld\n\n", tsp->old_stime.tv_sec); } } } FREEBUF(task_start_times); } static int start_time_timespec(void) { char buf[BUFSIZE]; switch(tt->flags & (TIMESPEC | NO_TIMESPEC)) { case TIMESPEC: return TRUE; case NO_TIMESPEC: return FALSE; default: break; } tt->flags |= NO_TIMESPEC; open_tmpfile(); sprintf(buf, "ptype struct task_struct"); if (!gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR)) { close_tmpfile(); return FALSE; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "start_time;")) { if (strstr(buf, "struct timespec")) { tt->flags &= ~NO_TIMESPEC; tt->flags |= TIMESPEC; } } } close_tmpfile(); return (tt->flags & TIMESPEC ? TRUE : FALSE); } static ulonglong convert_start_time(ulonglong start_time, ulonglong current) { ulong tmp1, tmp2; ulonglong wrapped; switch(tt->flags & (TIMESPEC | NO_TIMESPEC)) { case TIMESPEC: if ((start_time * (ulonglong)machdep->hz) > current) return 0; else return current - (start_time * (ulonglong)machdep->hz); case NO_TIMESPEC: if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { wrapped = (start_time & 0xffffffff00000000ULL); if (wrapped) { wrapped -= 0x100000000ULL; start_time &= 0x00000000ffffffffULL; start_time |= wrapped; start_time += (ulonglong)(300*machdep->hz); } else { tmp1 = (ulong)(uint)(-300*machdep->hz); tmp2 = (ulong)start_time; start_time = (ulonglong)(tmp2 - tmp1); } } break; default: break; } return start_time; } /* * The comparison function must return an integer less than, * equal to, or greater than zero if the first argument is * considered to be respectively less than, equal to, or * greater than the second. If two members compare as equal, * their order in the sorted array is undefined. */ static int compare_start_time(const void *v1, const void *v2) { struct task_start_time *t1, *t2; t1 = (struct task_start_time *)v1; t2 = (struct task_start_time *)v2; return (t1->start_time < t2->start_time ? -1 : t1->start_time == t2->start_time ? 0 : 1); } static ulong parent_of(ulong task) { long offset; ulong parent; if (VALID_MEMBER(task_struct_parent)) offset = OFFSET(task_struct_parent); else offset = OFFSET(task_struct_p_pptr); readmem(task+offset, KVADDR, &parent, sizeof(void *), "task parent", FAULT_ON_ERROR); return parent; } /* * Dump the parental hierarchy of a task. */ static void parent_list(ulong task) { int i, j, cnt; struct task_context *tc; char *buffer; long reserved; ulong *task_list, child, parent; reserved = 100 * sizeof(ulong); buffer = GETBUF(reserved); task_list = (ulong *)buffer; child = task_list[0] = task; parent = parent_of(child); cnt = 1; while (child != parent) { child = task_list[cnt++] = parent; parent = parent_of(child); if (cnt == reserved) { RESIZEBUF(buffer, reserved, reserved * 2); reserved *= 2; task_list = (ulong *)buffer; } } for (i = cnt-1, j = 0; i >= 0; i--, j++) { INDENT(j); tc = task_to_context(task_list[i]); if (tc) print_task_header(fp, tc, 0); } FREEBUF(task_list); } /* * Dump the children of a task. */ static void child_list(ulong task) { int i; int cnt; struct task_context *tc; tc = task_to_context(task); print_task_header(fp, tc, 0); tc = FIRST_CONTEXT(); for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->ptask == task) { INDENT(2); print_task_header(fp, tc, 0); cnt++; } } if (!cnt) fprintf(fp, " (no children)\n"); } /* * Dump the children of a task. */ static void show_tgid_list(ulong task) { int i; int cnt; struct task_context *tc; ulong tgid; tc = task_to_context(task); tgid = task_tgid(task); if (tc->pid != tgid) { if (pc->curcmd_flags & TASK_SPECIFIED) { if (!(tc = tgid_to_context(tgid))) return; task = tc->task; } else return; } if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN)) return; print_task_header(fp, tc, 0); tc = FIRST_CONTEXT(); for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->task == task) continue; if (task_tgid(tc->task) == tgid) { INDENT(2); print_task_header(fp, tc, 0); cnt++; if (tc->pid == 0) pc->curcmd_flags |= IDLE_TASK_SHOWN; } } if (!cnt) fprintf(fp, " (no threads)\n"); fprintf(fp, "\n"); } /* * Return the first task found that belongs to a pid. */ ulong pid_to_task(ulong pid) { int i; struct task_context *tc; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) if (tc->pid == pid) return(tc->task); return((ulong)NULL); } /* * Return the pid of a task. */ ulong task_to_pid(ulong task) { int i; struct task_context *tc; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) if (tc->task == task) return(tc->pid); return(NO_PID); } /* * Verify whether a task exists. */ int task_exists(ulong task) { int i; struct task_context *tc; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) if (tc->task == task) return TRUE; return FALSE; } /* * Return the task_context structure of a task. */ struct task_context * task_to_context(ulong task) { int i; struct task_context *tc; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) if (tc->task == task) return tc; return NULL; } /* * Return a tgid's parent task_context structure. */ struct task_context * tgid_to_context(ulong parent_tgid) { int i; struct task_context *tc; ulong tgid; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { tgid = task_tgid(tc->task); if ((tgid == parent_tgid) && (tgid == tc->pid)) return tc; } return NULL; } /* * Return the task_context structure of the first task found with a pid, * while linking all tasks that have that pid. */ struct task_context * pid_to_context(ulong pid) { int i; struct task_context *tc, *firsttc, *lasttc; tc = FIRST_CONTEXT(); firsttc = lasttc = NULL; for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->pid == pid) { if (!firsttc) firsttc = tc; if (lasttc) lasttc->tc_next = tc; tc->tc_next = NULL; lasttc = tc; } } return firsttc; } /* * Verify whether a pid exists, and if found, linking all tasks having the pid. */ int pid_exists(ulong pid) { int i; struct task_context *tc, *lasttc; int count; tc = FIRST_CONTEXT(); count = 0; lasttc = NULL; for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->pid == pid) { count++; if (lasttc) lasttc->tc_next = tc; tc->tc_next = NULL; lasttc = tc; } } return(count); } /* * Translate a stack pointer to a task, dealing with possible split. * If that doesn't work, check the hardirq_stack and softirq_stack. */ ulong stkptr_to_task(ulong sp) { int i, c; struct task_context *tc; struct bt_info bt_info, *bt; bt = &bt_info; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { bt->stackbase = GET_STACKBASE(tc->task); bt->stacktop = GET_STACKTOP(tc->task); if (INSTACK(sp, bt)) return tc->task; } if (!(tt->flags & IRQSTACKS)) return NO_TASK; bt = &bt_info; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { bt->stackbase = tt->hardirq_ctx[c]; bt->stacktop = bt->stackbase + SIZE(irq_ctx); if (INSTACK(sp, bt) && (tt->hardirq_tasks[c] == tc->task)) return tc->task; } if (tt->softirq_ctx[c]) { bt->stackbase = tt->softirq_ctx[c]; bt->stacktop = bt->stackbase + SIZE(irq_ctx); if (INSTACK(sp, bt) && (tt->softirq_tasks[c] == tc->task)) return tc->task; } } } return NO_TASK; } /* * Translate a task pointer to its thread_info. */ ulong task_to_thread_info(ulong task) { int i; struct task_context *tc; if (!(tt->flags & THREAD_INFO)) error(FATAL, "task_to_thread_info: thread_info struct does not exist!\n"); tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->task == task) return tc->thread_info; } return(error(FATAL, "task does not exist: %lx\n", task)); } /* * Translate a task address to its stack base, dealing with potential split. */ ulong task_to_stackbase(ulong task) { if (tt->flags & THREAD_INFO) return task_to_thread_info(task); else return (task & ~(STACKSIZE()-1)); } /* * Try to translate a decimal or hexadecimal string into a task or pid, * failing if no task or pid exists, or if there is ambiguity between * the decimal and hexadecimal translations. However, if the value could * be a decimal PID and a hexadecimal PID of two different processes, then * default to the decimal value. * * This was added in preparation for overlapping, zero-based, user and kernel * virtual addresses on s390 and s390x, allowing for the entry of ambiguous * decimal/hexadecimal task address values without the leading "0x". * It should be used in lieu of "stol" when parsing for task/pid arguments. */ int str_to_context(char *string, ulong *value, struct task_context **tcp) { ulong dvalue, hvalue; int found, type; char *s; struct task_context *tc_dp, *tc_dt, *tc_hp, *tc_ht; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; tc_dp = tc_dt = tc_hp = tc_ht = NULL; type = STR_INVALID; if (dvalue != BADADDR) { if ((tc_dp = pid_to_context(dvalue))) found++; if ((tc_dt = task_to_context(dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((tc_hp = pid_to_context(hvalue))) found++; if ((tc_ht = task_to_context(hvalue))) found++; } switch (found) { case 2: if (tc_dp && tc_hp) { *tcp = tc_dp; *value = dvalue; type = STR_PID; } break; case 1: if (tc_dp) { *tcp = tc_dp; *value = dvalue; type = STR_PID; } if (tc_dt) { *tcp = tc_dt; *value = dvalue; type = STR_TASK; } if (tc_hp) { *tcp = tc_hp; *value = hvalue; type = STR_PID; } if (tc_ht) { *tcp = tc_ht; *value = hvalue; type = STR_TASK; } break; } return type; } /* * Return the task if the vaddr is part of a task's task_struct. */ ulong vaddr_in_task_struct(ulong vaddr) { int i; struct task_context *tc; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if ((vaddr >= tc->task) && (vaddr < (tc->task + SIZE(task_struct)))) return tc->task; } return NO_TASK; } /* * Verify whether any task is running a command. */ int comm_exists(char *s) { int i, cnt; struct task_context *tc; char buf[TASK_COMM_LEN]; strlcpy(buf, s, TASK_COMM_LEN); tc = FIRST_CONTEXT(); for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) if (STREQ(tc->comm, buf)) cnt++; return cnt; } /* * Set a new context. If only a pid is passed, the first task found with * that pid is selected. */ int set_context(ulong task, ulong pid) { int i; struct task_context *tc; int found; tc = FIRST_CONTEXT(); for (i = 0, found = FALSE; i < RUNNING_TASKS(); i++, tc++) { if (task && (tc->task == task)) { found = TRUE; break; } else if (pid == tc->pid) { found = TRUE; break; } } if (found) { CURRENT_CONTEXT() = tc; return TRUE; } else { if (task) error(INFO, "cannot set context for task: %lx\n", task); else error(INFO, "cannot set context for pid: %d\n", pid); return FALSE; } } /* * Check whether the panic was determined to be caused by a "sys -panic" * command. If so, fix the task_context's pid despite what the task_struct * says. */ #define CONTEXT_ADJUSTED (1) #define CONTEXT_ERRONEOUS (2) static int panic_context_adjusted(struct task_context *tc) { pid_t pgrp, tgid; char buf[BUFSIZE]; if (!(DUMPFILE() && (tc == task_to_context(tt->panic_task)) && (tc->pid == 0) && STRNEQ(tc->comm, pc->program_name) && strstr(get_panicmsg(buf), "Attempted to kill the idle task"))) return 0; if (INVALID_MEMBER(task_struct_pgrp) || INVALID_MEMBER(task_struct_tgid)) return CONTEXT_ERRONEOUS; fill_task_struct(tc->task); pgrp = tt->last_task_read ? UINT(tt->task_struct + OFFSET(task_struct_pgrp)) : 0; tgid = tt->last_task_read ? UINT(tt->task_struct + OFFSET(task_struct_tgid)) : 0; if (pgrp && tgid && (pgrp == tgid) && !pid_exists((ulong)pgrp)) { tc->pid = (ulong)pgrp; return CONTEXT_ADJUSTED; } return CONTEXT_ERRONEOUS; } /* * Display a task context. */ void show_context(struct task_context *tc) { char buf[BUFSIZE]; char *p1; int adjusted, cnt, indent; adjusted = pc->flags & RUNTIME ? 0 : panic_context_adjusted(tc); indent = pc->flags & RUNTIME ? 0 : 5; INDENT(indent); fprintf(fp, " PID: %ld\n", tc->pid); INDENT(indent); fprintf(fp, "COMMAND: \"%s\"\n", tc->comm); INDENT(indent); fprintf(fp, " TASK: %lx ", tc->task); if ((machdep->flags & (INIT|MCA)) && (tc->pid == 0)) cnt = comm_exists(tc->comm); else cnt = TASKS_PER_PID(tc->pid); if (cnt > 1) fprintf(fp, "(1 of %d) ", cnt); if (tt->flags & THREAD_INFO) fprintf(fp, "[THREAD_INFO: %lx]", tc->thread_info); fprintf(fp, "\n"); INDENT(indent); fprintf(fp, " CPU: %s\n", task_cpu(tc->processor, buf, VERBOSE)); INDENT(indent); fprintf(fp, " STATE: %s ", task_state_string(tc->task, buf, VERBOSE)); if (is_task_active(tc->task)) { if (machdep->flags & HWRESET) fprintf(fp, "(HARDWARE RESET)"); else if ((pc->flags & SYSRQ) && (tc->task == tt->panic_task)) fprintf(fp, "(SYSRQ)"); else if (machdep->flags & INIT) fprintf(fp, "(INIT)"); else if ((machdep->flags & MCA) && (tc->task == tt->panic_task)) fprintf(fp, "(MCA)"); else if ((tc->processor >= 0) && (tc->processor < NR_CPUS) && (kt->cpu_flags[tc->processor] & NMI)) fprintf(fp, "(NMI)"); else if ((tc->task == tt->panic_task) && XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) fprintf(fp, "(SUSPEND)"); else if ((tc->task == tt->panic_task) && !(pc->flags2 & SNAP)) fprintf(fp, "(PANIC)"); else fprintf(fp, "(ACTIVE)"); } if (!(pc->flags & RUNTIME) && !ACTIVE() && (tt->flags & PANIC_TASK_NOT_FOUND) && !SYSRQ_TASK(tc->task)) { fprintf(fp, "\n"); INDENT(indent); if (machine_type("S390") || machine_type("S390X")) fprintf(fp, " INFO: no panic task found"); else if (tt->panic_processor >= 0) fprintf(fp, "WARNING: reported panic task %lx not found", tt->panic_threads[tt->panic_processor]); else fprintf(fp, "WARNING: panic task not found"); } fprintf(fp, "\n"); if (pc->flags & RUNTIME) return; /* * Dump any pre-first-prompt messages here. */ cnt = 0; if (pc->flags & NAMELIST_UNLINKED) { strcpy(buf, pc->namelist); if ((p1 = strstr(buf, "@"))) *p1 = NULLCHAR; fprintf(fp, "%sNOTE: To save the remote \"%s\" locally,\n enter: \"save kernel\"\n", cnt++ ? "" : "\n", buf); } if (REMOTE_DUMPFILE()) fprintf(fp, "%sNOTE: To save the remote \"%s\" locally,\n enter: \"save dumpfile\"\n", cnt++ ? "" : "\n", basename(pc->server_memsrc)); /* * If this panic was caused by a "sys -panic" command, issue the * proper warning message. */ switch (adjusted) { case CONTEXT_ADJUSTED: fprintf(fp, "%sNOTE: The \"%s\" task_struct will erroneously show a p_pid of 0\n", cnt++ ? "" : "\n", tc->comm); break; case CONTEXT_ERRONEOUS: fprintf(fp, "%sWARNING: The \"%s\" context will erroneously show a PID of 0\n", cnt++ ? "" : "\n", tc->comm); break; } if (!(pc->flags & RUNTIME) && (tt->flags & ACTIVE_ONLY)) error(WARNING, "\nonly the active tasks on each cpu are being tracked\n"); } /* * Translate a task_struct state value into a long (verbose), or short string, * or if requested, just pass back the state value. */ #define TASK_STATE_UNINITIALIZED (-1) static long _RUNNING_ = TASK_STATE_UNINITIALIZED; static long _INTERRUPTIBLE_ = TASK_STATE_UNINITIALIZED; static long _UNINTERRUPTIBLE_ = TASK_STATE_UNINITIALIZED; static long _STOPPED_ = TASK_STATE_UNINITIALIZED; static long _TRACING_STOPPED_ = TASK_STATE_UNINITIALIZED; long _ZOMBIE_ = TASK_STATE_UNINITIALIZED; /* also used by IS_ZOMBIE() */ static long _DEAD_ = TASK_STATE_UNINITIALIZED; static long _SWAPPING_ = TASK_STATE_UNINITIALIZED; static long _EXCLUSIVE_ = TASK_STATE_UNINITIALIZED; static long _WAKEKILL_ = TASK_STATE_UNINITIALIZED; static long _WAKING_ = TASK_STATE_UNINITIALIZED; static long _NONINTERACTIVE_ = TASK_STATE_UNINITIALIZED; static long _PARKED_ = TASK_STATE_UNINITIALIZED; static long _NOLOAD_ = TASK_STATE_UNINITIALIZED; #define valid_task_state(X) ((X) != TASK_STATE_UNINITIALIZED) static void dump_task_states(void) { int hi, lo; fprintf(fp, " RUNNING: %3ld (0x%lx)\n", _RUNNING_, _RUNNING_); fprintf(fp, " INTERRUPTIBLE: %3ld (0x%lx)\n", _INTERRUPTIBLE_, _INTERRUPTIBLE_); fprintf(fp, " UNINTERRUPTIBLE: %3ld (0x%lx)\n", _UNINTERRUPTIBLE_, _UNINTERRUPTIBLE_); fprintf(fp, " STOPPED: %3ld (0x%lx)\n", _STOPPED_, _STOPPED_); if (valid_task_state(_TRACING_STOPPED_)) { if (count_bits_long(_TRACING_STOPPED_) > 1) { lo = lowest_bit_long(_TRACING_STOPPED_); hi = highest_bit_long(_TRACING_STOPPED_); fprintf(fp, " TRACING_STOPPED: %3d and %d (0x%x and 0x%x)\n", 1< 1) { lo = lowest_bit_long(_DEAD_); hi = highest_bit_long(_DEAD_); fprintf(fp, " DEAD: %3d and %d (0x%x and 0x%x)\n", 1< 0) && read_string(symbol_value("stat_nam"), buf, BUFSIZE-1) && ascii_string(buf) && (strlen(buf) > strlen("RSDTtZX"))) { for (i = 0; i < strlen(buf); i++) { switch (buf[i]) { case 'R': _RUNNING_ = i; break; case 'S': _INTERRUPTIBLE_ = i; break; case 'D': _UNINTERRUPTIBLE_ = (1 << (i-1)); break; case 'T': _STOPPED_ = (1 << (i-1)); break; case 't': _TRACING_STOPPED_ = (1 << (i-1)); break; case 'X': if (_DEAD_ == UNINITIALIZED) _DEAD_ = (1 << (i-1)); else _DEAD_ |= (1 << (i-1)); break; case 'Z': _ZOMBIE_ = (1 << (i-1)); break; case 'x': if (_DEAD_ == UNINITIALIZED) _DEAD_ = (1 << (i-1)); else _DEAD_ |= (1 << (i-1)); break; case 'K': _WAKEKILL_ = (1 << (i-1)); break; case 'W': _WAKING_ = (1 << (i-1)); break; case 'P': _PARKED_ = (1 << (i-1)); break; case 'N': _NOLOAD_ = (1 << (i-1)); break; } } goto done_states; } if ((len = get_array_length("task_state_array", NULL, 0)) <= 0) goto old_defaults; bitpos = 0; for (i = 0; i < len; i++) { if (!read_string(str, buf, BUFSIZE-1)) break; if (CRASHDEBUG(3)) fprintf(fp, "%s%s[%d][%s]\n", bitpos ? "" : "\n", i < 10 ? " " : "", i, buf); if (strstr(buf, "(running)")) _RUNNING_ = bitpos; else if (strstr(buf, "(sleeping)")) _INTERRUPTIBLE_ = bitpos; else if (strstr(buf, "(disk sleep)")) _UNINTERRUPTIBLE_ = bitpos; else if (strstr(buf, "(stopped)")) _STOPPED_ = bitpos; else if (strstr(buf, "(zombie)")) _ZOMBIE_ = bitpos; else if (strstr(buf, "(dead)")) { if (_DEAD_ == TASK_STATE_UNINITIALIZED) _DEAD_ = bitpos; else _DEAD_ |= bitpos; } else if (strstr(buf, "(swapping)")) /* non-existent? */ _SWAPPING_ = bitpos; else if (strstr(buf, "(tracing stop)")) { if (_TRACING_STOPPED_ == TASK_STATE_UNINITIALIZED) _TRACING_STOPPED_ = bitpos; else _TRACING_STOPPED_ |= bitpos; } else if (strstr(buf, "(wakekill)")) _WAKEKILL_ = bitpos; else if (strstr(buf, "(waking)")) _WAKING_ = bitpos; else if (strstr(buf, "(parked)")) _PARKED_ = bitpos; if (!bitpos) bitpos = 1; else bitpos = bitpos << 1; task_state_array += sizeof(void *); if (!readmem(task_state_array, KVADDR, &str, sizeof(void *), "task_state_array", RETURN_ON_ERROR)) break; } if ((THIS_KERNEL_VERSION >= LINUX(2,6,16)) && (THIS_KERNEL_VERSION < LINUX(2,6,24))) { _NONINTERACTIVE_ = 64; } if (THIS_KERNEL_VERSION >= LINUX(2,6,32)) { /* * Account for states not listed in task_state_array[] */ if (count_bits_long(_DEAD_) == 1) { bitpos = 1<< lowest_bit_long(_DEAD_); _DEAD_ |= (bitpos<<1); /* TASK_DEAD */ _WAKEKILL_ = (bitpos<<2); /* TASK_WAKEKILL */ _WAKING_ = (bitpos<<3); /* TASK_WAKING */ } } done_states: if (CRASHDEBUG(3)) dump_task_states(); if (!valid_task_state(_RUNNING_) || !valid_task_state(_INTERRUPTIBLE_) || !valid_task_state(_UNINTERRUPTIBLE_) || !valid_task_state(_ZOMBIE_) || !valid_task_state(_STOPPED_)) { if (CRASHDEBUG(3)) fprintf(fp, "initialize_task_state: using old defaults\n"); goto old_defaults; } } /* * Print multiple state strings if appropriate. */ static char * task_state_string_verbose(ulong task, char *buf) { long state, both; int count; state = task_state(task); buf[0] = NULLCHAR; count = 0; if (state == _RUNNING_) { sprintf(buf, "TASK_RUNNING"); return buf; } if (state & _INTERRUPTIBLE_) sprintf(&buf[strlen(buf)], "%sTASK_INTERRUPTIBLE", count++ ? "|" : ""); if (state & _UNINTERRUPTIBLE_) sprintf(&buf[strlen(buf)], "%sTASK_UNINTERRUPTIBLE", count++ ? "|" : ""); if (state & _STOPPED_) sprintf(&buf[strlen(buf)], "%sTASK_STOPPED", count++ ? "|" : ""); if (state & _TRACING_STOPPED_) sprintf(&buf[strlen(buf)], "%sTASK_TRACED", count++ ? "|" : ""); if ((both = (state & _DEAD_))) { if (count_bits_long(both) > 1) sprintf(&buf[strlen(buf)], "%sEXIT_DEAD|TASK_DEAD", count++ ? "|" : ""); else sprintf(&buf[strlen(buf)], "%sEXIT_DEAD", count++ ? "|" : ""); } if (state & _ZOMBIE_) sprintf(&buf[strlen(buf)], "%sEXIT_ZOMBIE", count++ ? "|" : ""); if (valid_task_state(_WAKING_) && (state & _WAKING_)) sprintf(&buf[strlen(buf)], "%sTASK_WAKING", count++ ? "|" : ""); if (valid_task_state(_WAKEKILL_) && (state & _WAKEKILL_)) sprintf(&buf[strlen(buf)], "%sTASK_WAKEKILL", count++ ? "|" : ""); if (valid_task_state(_NOLOAD_) && (state & _NOLOAD_)) sprintf(&buf[strlen(buf)], "%sTASK_NOLOAD", count++ ? "|" : ""); if (valid_task_state(_NONINTERACTIVE_) && (state & _NONINTERACTIVE_)) sprintf(&buf[strlen(buf)], "%sTASK_NONINTERACTIVE", count++ ? "|" : ""); if (state == _PARKED_) { sprintf(buf, "TASK_PARKED"); return buf; } return buf; } char * task_state_string(ulong task, char *buf, int verbose) { long state; int exclusive; int valid, set; if (_RUNNING_ == TASK_STATE_UNINITIALIZED) initialize_task_state(); if (verbose) return task_state_string_verbose(task, buf); if (buf) sprintf(buf, verbose ? "(unknown)" : "??"); state = task_state(task); set = valid = exclusive = 0; if (valid_task_state(_EXCLUSIVE_)) { exclusive = state & _EXCLUSIVE_; state &= ~(_EXCLUSIVE_); } if (state == _RUNNING_) { sprintf(buf, "RU"); valid++; } if (state & _INTERRUPTIBLE_) { sprintf(buf, "IN"); valid++; set++; } if (state & _UNINTERRUPTIBLE_) { sprintf(buf, "UN"); valid++; set++; } if (state & _ZOMBIE_) { sprintf(buf, "ZO"); valid++; set++; } if (state & _STOPPED_) { sprintf(buf, "ST"); valid++; set++; } if (valid_task_state(_TRACING_STOPPED_) && (state & _TRACING_STOPPED_)) { sprintf(buf, "TR"); valid++; set++; } if (state == _SWAPPING_) { sprintf(buf, "SW"); valid++; set++; } if ((state & _DEAD_) && !set) { sprintf(buf, "DE"); valid++; set++; } if (state == _PARKED_) { sprintf(buf, "PA"); valid++; } if (state == _WAKING_) { sprintf(buf, "WA"); valid++; } if (valid && exclusive) strcat(buf, "EX"); return buf; } /* * Return a task's state and exit_state together. */ ulong task_state(ulong task) { ulong state, exit_state; fill_task_struct(task); if (!tt->last_task_read) return 0; state = ULONG(tt->task_struct + OFFSET(task_struct_state)); exit_state = VALID_MEMBER(task_struct_exit_state) ? ULONG(tt->task_struct + OFFSET(task_struct_exit_state)) : 0; return (state | exit_state); } /* * Return a task's flags. */ ulong task_flags(ulong task) { ulong flags; fill_task_struct(task); flags = tt->last_task_read ? ULONG(tt->task_struct + OFFSET(task_struct_flags)) : 0; return flags; } /* * Return a task's tgid. */ ulong task_tgid(ulong task) { uint tgid; fill_task_struct(task); tgid = tt->last_task_read ? UINT(tt->task_struct + OFFSET(task_struct_tgid)) : 0; return (ulong)tgid; } ulonglong task_last_run(ulong task) { ulong last_run; ulonglong timestamp; timestamp = 0; fill_task_struct(task); if (VALID_MEMBER(task_struct_last_run)) { last_run = tt->last_task_read ? ULONG(tt->task_struct + OFFSET(task_struct_last_run)) : 0; timestamp = (ulonglong)last_run; } else if (VALID_MEMBER(task_struct_timestamp)) timestamp = tt->last_task_read ? ULONGLONG(tt->task_struct + OFFSET(task_struct_timestamp)) : 0; else if (VALID_MEMBER(sched_info_last_arrival)) timestamp = tt->last_task_read ? ULONGLONG(tt->task_struct + OFFSET(task_struct_sched_info) + OFFSET(sched_info_last_arrival)) : 0; return timestamp; } /* * Return a task's mm_struct address. If "fill" is set, the mm_struct * cache is loaded. */ ulong task_mm(ulong task, int fill) { ulong mm_struct; fill_task_struct(task); if (!tt->last_task_read) return 0; mm_struct = ULONG(tt->task_struct + OFFSET(task_struct_mm)); if (fill && mm_struct) fill_mm_struct(mm_struct); return mm_struct; } /* * Translate a processor number into a string, taking NO_PROC_ID into account. */ char * task_cpu(int processor, char *buf, int verbose) { if (processor < NR_CPUS) sprintf(buf, "%d", processor); else sprintf(buf, verbose ? "(unknown)" : "?"); return buf; } /* * Check either the panic_threads[] array on a dump, or the has_cpu flag * of a task_struct on a live system. Also account for deprecation of * usage of has_cpu on non-SMP systems. */ int is_task_active(ulong task) { int has_cpu; if (DUMPFILE() && is_panic_thread(task)) return TRUE; fill_task_struct(task); has_cpu = tt->last_task_read ? task_has_cpu(task, tt->task_struct) : 0; if (!(kt->flags & SMP) && !has_cpu && ACTIVE() && (task == tt->this_task)) has_cpu = TRUE; return(has_cpu); } /* * Return true if a task is the panic_task or is contained within the * panic_threads[] array. */ int is_panic_thread(ulong task) { int i; if (DUMPFILE()) { if (tt->panic_task == task) return TRUE; for (i = 0; i < NR_CPUS; i++) if (tt->panic_threads[i] == task) return TRUE; } return FALSE; } /* * Depending upon the kernel, check the task_struct's has_cpu or cpus_runnable * field if either exist, or the global runqueues[].curr via get_active_set() * to determine whether a task is running on a cpu. */ static int task_has_cpu(ulong task, char *local_task) { int i, has_cpu; ulong cpus_runnable; if (DUMPFILE() && (task == tt->panic_task)) /* no need to continue */ return TRUE; if (VALID_MEMBER(task_struct_has_cpu)) { if (local_task) has_cpu = INT(local_task+OFFSET(task_struct_has_cpu)); else if (!readmem((ulong)(task+OFFSET(task_struct_has_cpu)), KVADDR, &has_cpu, sizeof(int), "task_struct has_cpu", RETURN_ON_ERROR)) has_cpu = FALSE; } else if (VALID_MEMBER(task_struct_cpus_runnable)) { if (local_task) cpus_runnable = ULONG(local_task + OFFSET(task_struct_cpus_runnable)); else if (!readmem((ulong)(task + OFFSET(task_struct_cpus_runnable)), KVADDR, &cpus_runnable, sizeof(ulong), "task_struct cpus_runnable", RETURN_ON_ERROR)) cpus_runnable = ~0UL; has_cpu = (cpus_runnable != ~0UL); } else if (get_active_set()) { for (i = 0, has_cpu = FALSE; i < NR_CPUS; i++) { if (task == tt->active_set[i]) { has_cpu = TRUE; break; } } } else error(FATAL, "task_struct has no has_cpu, or cpus_runnable; runqueues[] not defined?\n"); return has_cpu; } /* * If a task is in the panic_threads array and has an associated panic_ksp * array entry, return it. */ int get_panic_ksp(struct bt_info *bt, ulong *ksp) { int i; if (tt->flags & PANIC_KSP) { for (i = 0; i < NR_CPUS; i++) { if ((tt->panic_threads[i] == bt->task) && tt->panic_ksp[i] && INSTACK(tt->panic_ksp[i], bt)) { *ksp = tt->panic_ksp[i]; return TRUE; } } } return FALSE; } /* * Look for kcore's storage information for the system's panic state. * If it's not there (somebody else's dump format?), look through all the * stack traces for evidence of panic. */ static ulong get_panic_context(void) { int i; struct task_context *tc; ulong panic_threads_addr; ulong task; char *tp; for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->active_set[i])) continue; if (!task_exists(task)) { error(WARNING, "active task %lx on cpu %d not found in PID hash\n\n", task, i); if ((tp = fill_task_struct(task))) { if ((tc = store_context(NULL, task, tp))) tt->running_tasks++; else continue; } } } /* * --no_panic command line option */ if (tt->flags & PANIC_TASK_NOT_FOUND) goto use_task_0; tt->panic_processor = -1; task = NO_TASK; tc = FIRST_CONTEXT(); if (symbol_exists("panic_threads") && symbol_exists("panicmsg") && symbol_exists("panic_processor")) { panic_threads_addr = symbol_value("panic_threads"); get_symbol_data("panic_processor", sizeof(int), &tt->panic_processor); get_symbol_data("panicmsg", sizeof(char *), &tt->panicmsg); if (!readmem(panic_threads_addr, KVADDR, tt->panic_threads, sizeof(void *)*NR_CPUS, "panic_processor array", RETURN_ON_ERROR)) goto use_task_0; task = tt->panic_threads[tt->panic_processor]; if (symbol_exists("panic_ksp")) { if (!(tt->panic_ksp = (ulong *) calloc(NR_CPUS, sizeof(void *)))) error(FATAL, "cannot malloc panic_ksp array.\n"); readmem(symbol_value("panic_ksp"), KVADDR, tt->panic_ksp, sizeof(void *)*NR_CPUS, "panic_ksp array", RETURN_ON_ERROR); tt->flags |= PANIC_KSP; } if (machdep->flags & HWRESET) { populate_panic_threads(); task = tt->panic_threads[0]; } } if (task && task_exists(task)) return(tt->panic_task = task); if (task) error(INFO, "reported panic task %lx does not exist!\n\n", task); if ((tc = panic_search())) { tt->panic_processor = tc->processor; return(tt->panic_task = tc->task); } use_task_0: if (CRASHDEBUG(1)) error(INFO, "get_panic_context: panic task not found\n"); tt->flags |= PANIC_TASK_NOT_FOUND; tc = FIRST_CONTEXT(); return(tc->task); } /* * Get the active task on a cpu -- from a dumpfile only. */ ulong get_active_task(int cpu) { int i; ulong task; struct task_context *tc; if (DUMPFILE() && (task = tt->panic_threads[cpu])) return task; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if ((tc->processor == cpu) && is_task_active(tc->task)) return(tc->task); } return NO_TASK; } /* * Read the panic string. */ char * get_panicmsg(char *buf) { int msg_found; BZERO(buf, BUFSIZE); msg_found = FALSE; if (tt->panicmsg) { read_string(tt->panicmsg, buf, BUFSIZE-1); msg_found = TRUE; } else if (LKCD_DUMPFILE()) { get_lkcd_panicmsg(buf); msg_found = TRUE; } if (msg_found == TRUE) return(buf); open_tmpfile(); dump_log(SHOW_LOG_TEXT); /* * First check for a SYSRQ-generated crash, and set the * active-task flag appropriately. The message may or * may not be used as the panic message. */ rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "SysRq : Crash") || strstr(buf, "SysRq : Trigger a crash")) { pc->flags |= SYSRQ; break; } } rewind(pc->tmpfile); while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "general protection fault: ") || strstr(buf, "double fault: ") || strstr(buf, "divide error: ") || strstr(buf, "stack segment: ")) { msg_found = TRUE; break; } } rewind(pc->tmpfile); while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "SysRq : Netdump") || strstr(buf, "SysRq : Crash") || strstr(buf, "SysRq : Trigger a crash")) { pc->flags |= SYSRQ; msg_found = TRUE; break; } } rewind(pc->tmpfile); while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "Oops: ") || strstr(buf, "Kernel BUG at") || strstr(buf, "kernel BUG at") || strstr(buf, "Unable to handle kernel paging request") || strstr(buf, "Unable to handle kernel NULL pointer dereference") || strstr(buf, "BUG: unable to handle kernel ")) msg_found = TRUE; } rewind(pc->tmpfile); while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "sysrq") && symbol_exists("sysrq_pressed")) { get_symbol_data("sysrq_pressed", sizeof(int), &msg_found); break; } } rewind(pc->tmpfile); while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "Kernel panic: ") || strstr(buf, "Kernel panic - ")) { msg_found = TRUE; break; } } rewind(pc->tmpfile); while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "[Hardware Error]: ")) { msg_found = TRUE; break; } } close_tmpfile(); if (!msg_found) BZERO(buf, BUFSIZE); return(buf); } /* * This command allows the running of a set of commands on any or all * tasks running on a system. The target tasks may be designated by * pid, task or command name. The available command set is designated by * the FOREACH_xxx definitions below. If a running command name string * conflicts with a foreach command, the command name string may be * prefixed with a \ character. */ void cmd_foreach(void) { int a, c, k, t, p; ulong value; static struct foreach_data foreach_data; struct foreach_data *fd; struct task_context *tc; char *p1; int key; BZERO(&foreach_data, sizeof(struct foreach_data)); fd = &foreach_data; while ((c = getopt(argcnt, args, "R:vomlgersStTpukcfFxhdaG")) != EOF) { switch(c) { case 'R': fd->reference = optarg; break; case 'h': case 'x': fd->flags |= FOREACH_x_FLAG; break; case 'd': fd->flags |= FOREACH_d_FLAG; break; case 'v': fd->flags |= FOREACH_v_FLAG; break; case 'm': fd->flags |= FOREACH_m_FLAG; break; case 'l': fd->flags |= FOREACH_l_FLAG; break; case 'o': fd->flags |= FOREACH_o_FLAG; break; case 'g': fd->flags |= FOREACH_g_FLAG; break; case 'e': fd->flags |= FOREACH_e_FLAG; break; case 's': fd->flags |= FOREACH_s_FLAG; break; case 'S': fd->flags |= FOREACH_S_FLAG; break; case 'r': fd->flags |= FOREACH_r_FLAG; break; case 'T': fd->flags |= FOREACH_T_FLAG; break; case 't': fd->flags |= FOREACH_t_FLAG; break; case 'p': fd->flags |= FOREACH_p_FLAG; break; case 'u': fd->flags |= FOREACH_u_FLAG; break; case 'k': fd->flags |= FOREACH_k_FLAG; break; case 'c': fd->flags |= FOREACH_c_FLAG; break; case 'f': fd->flags |= FOREACH_f_FLAG; break; case 'F': if (fd->flags & FOREACH_F_FLAG) fd->flags |= FOREACH_F_FLAG2; else fd->flags |= FOREACH_F_FLAG; break; case 'a': fd->flags |= FOREACH_a_FLAG; break; case 'G': fd->flags |= FOREACH_G_FLAG; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); a = c = k = t = p = 0; while (args[optind]) { /* * Once a keyword has been entered, then only accept * command arguments. */ if (k) { p1 = args[optind]; goto command_argument; } /* * If it's a keyword, grab it and check no further. */ if (is_foreach_keyword(args[optind], &key)) { if (k == MAX_FOREACH_KEYWORDS) error(INFO, "too many keywords!\n"); else fd->keyword_array[k++] = key; optind++; continue; } /* * If it's a task pointer or pid, take it. */ if (IS_A_NUMBER(args[optind])) { if (STREQ(args[optind], "DE") && pid_exists(0xde)) { error(INFO, "ambiguous task-identifying argument: %s\n", args[optind]); error(CONT, "for a \"state\" argument, use: \\DE\n"); error(CONT, "for a \"pid\" argument, use: 0xDE, 0xde, de or 222\n\n"); cmd_usage(pc->curcmd, SYNOPSIS); return; } switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: if (p == MAX_FOREACH_PIDS) error(INFO, "too many pids specified!\n"); else { fd->pid_array[p++] = value; fd->flags |= FOREACH_SPECIFIED; } optind++; continue; case STR_TASK: if (t == MAX_FOREACH_TASKS) error(INFO, "too many tasks specified!\n"); else { fd->task_array[t++] = value; fd->flags |= FOREACH_SPECIFIED; } optind++; continue; case STR_INVALID: break; } } /* * Select all kernel threads. */ if (STREQ(args[optind], "kernel")) { if (fd->flags & FOREACH_USER) error(FATAL, "user and kernel are mutually exclusive!\n"); fd->flags |= FOREACH_KERNEL; optind++; continue; } if ((args[optind][0] == '\\') && STREQ(&args[optind][1], "DE")) shift_string_left(args[optind], 1); if (STREQ(args[optind], "RU") || STREQ(args[optind], "IN") || STREQ(args[optind], "UN") || STREQ(args[optind], "ST") || STREQ(args[optind], "TR") || STREQ(args[optind], "ZO") || STREQ(args[optind], "DE") || STREQ(args[optind], "PA") || STREQ(args[optind], "WA") || STREQ(args[optind], "SW")) { if (fd->flags & FOREACH_STATE) error(FATAL, "only one task state allowed\n"); if (STREQ(args[optind], "RU")) fd->state = _RUNNING_; else if (STREQ(args[optind], "IN")) fd->state = _INTERRUPTIBLE_; else if (STREQ(args[optind], "UN")) fd->state = _UNINTERRUPTIBLE_; else if (STREQ(args[optind], "ST")) fd->state = _STOPPED_; else if (STREQ(args[optind], "TR")) fd->state = _TRACING_STOPPED_; else if (STREQ(args[optind], "ZO")) fd->state = _ZOMBIE_; else if (STREQ(args[optind], "DE")) fd->state = _DEAD_; else if (STREQ(args[optind], "SW")) fd->state = _SWAPPING_; else if (STREQ(args[optind], "PA")) fd->state = _PARKED_; else if (STREQ(args[optind], "WA")) fd->state = _WAKING_; if (fd->state == TASK_STATE_UNINITIALIZED) error(FATAL, "invalid task state for this kernel: %s\n", args[optind]); fd->flags |= FOREACH_STATE; optind++; continue; } /* * Select only user threads. */ if (STREQ(args[optind], "user")) { if (fd->flags & FOREACH_KERNEL) error(FATAL, "user and kernel are mutually exclusive!\n"); fd->flags |= FOREACH_USER; optind++; continue; } /* * Select only active tasks (dumpfile only) */ if (STREQ(args[optind], "active")) { if (!DUMPFILE()) error(FATAL, "active option not allowed on live systems\n"); fd->flags |= FOREACH_ACTIVE; optind++; continue; } /* * Regular expression is exclosed within "'" character. * The args[optind] string may not be modified, so a copy * is duplicated. */ if (SINGLE_QUOTED_STRING(args[optind])) { if (fd->regexs == MAX_REGEX_ARGS) error(INFO, "too many expressions specified!\n"); else { p1 = strdup(&args[optind][1]); LASTCHAR(p1) = NULLCHAR; if (regcomp(&fd->regex_info[fd->regexs].regex, p1, REG_EXTENDED|REG_NOSUB)) { error(INFO, "invalid regular expression: %s\n", p1); free(p1); goto bailout; } fd->regex_info[fd->regexs].pattern = p1; if (fd->regexs++ == 0) { pc->cmd_cleanup_arg = (void *)fd; pc->cmd_cleanup = foreach_cleanup; } } optind++; continue; } /* * If it's a command name, prefixed or otherwise, take it. */ p1 = (args[optind][0] == '\\') ? &args[optind][1] : args[optind]; if (comm_exists(p1)) { if (c == MAX_FOREACH_COMMS) error(INFO, "too many commands specified!\n"); else { fd->comm_array[c++] = p1; fd->flags |= FOREACH_SPECIFIED; } optind++; continue; } command_argument: /* * If no keyword has been entered, we don't know what this * is -- most likely it's a bogus command specifier. We set * FOREACH_SPECIFIED in case it was a bad specifier and no * other task selectors exist -- which in turn would causes * the command to be erroneously run on all tasks. */ if (!k) { fd->flags |= FOREACH_SPECIFIED; error(INFO, "unknown argument: \"%s\"\n", args[optind]); optind++; continue; } /* * Must be an command argument -- so store it and let * the command deal with it... */ if (a == MAX_FOREACH_ARGS) error(INFO, "too many arguments specified!\n"); else fd->arg_array[a++] = (ulong)p1; optind++; } fd->flags |= FOREACH_CMD; fd->pids = p; fd->keys = k; fd->comms = c; fd->tasks = t; fd->args = a; if (fd->keys) foreach(fd); else error(INFO, "no keywords specified\n"); bailout: foreach_cleanup((void *)fd); } /* * Do the work for cmd_foreach(). */ void foreach(struct foreach_data *fd) { int i, j, k, a; struct task_context *tc, *tgc; int specified; int doit; int subsequent; unsigned int radix; ulong cmdflags; ulong tgid; struct reference reference, *ref; int print_header; struct bt_info bt_info, *bt; char buf[TASK_COMM_LEN]; struct psinfo psinfo; /* * Filter out any command/option issues. */ if (CRASHDEBUG(1)) { fprintf(fp, " flags: %lx\n", fd->flags); fprintf(fp, " task_array: %s", fd->tasks ? "" : "(none)"); for (j = 0; j < fd->tasks; j++) fprintf(fp, "[%lx] ", fd->task_array[j]); fprintf(fp, "\n"); fprintf(fp, " pid_array: %s", fd->pids ? "" : "(none)"); for (j = 0; j < fd->pids; j++) fprintf(fp, "[%ld] ", fd->pid_array[j]); fprintf(fp, "\n"); fprintf(fp, " comm_array: %s", fd->comms ? "" : "(none)"); for (j = 0; j < fd->comms; j++) fprintf(fp, "[%s] ", fd->comm_array[j]); fprintf(fp, "\n"); fprintf(fp, " regex_info: %s", fd->regexs ? "" : "(none)\n"); for (j = 0; j < fd->regexs; j++) { fprintf(fp, "%s[%d] pattern: [%s] ", j ? " " : "", j, fd->regex_info[j].pattern); fprintf(fp, "regex: [%lx]\n", (ulong)&fd->regex_info[j].regex); } fprintf(fp, "\n"); fprintf(fp, "keyword_array: %s", fd->keys ? "" : "(none)"); for (k = 0; k < fd->keys; k++) fprintf(fp, "[%d] ", fd->keyword_array[k]); fprintf(fp, "\n"); fprintf(fp, " arg_array: %s", fd->args ? "" : "(none)"); for (a = 0; a < fd->args; a++) fprintf(fp, "[%lx (%s)] ", fd->arg_array[a], (char *)fd->arg_array[a]); fprintf(fp, "\n"); fprintf(fp, " reference: \"%s\"\n", fd->reference ? fd->reference : ""); } print_header = TRUE; bt = NULL; for (k = 0; k < fd->keys; k++) { switch(fd->keyword_array[k]) { case FOREACH_NET: switch (fd->flags & (FOREACH_s_FLAG|FOREACH_S_FLAG)) { case (FOREACH_s_FLAG|FOREACH_S_FLAG): error(WARNING, "net -s and -S options are mutually exclusive!\n"); fd->flags = FOREACH_s_FLAG; break; case 0: error(WARNING, "net command requires -s or -S option\n\n"); fd->flags |= FOREACH_s_FLAG; break; } if ((fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) == (FOREACH_x_FLAG|FOREACH_d_FLAG)) error(FATAL, "net: -x and -d options are mutually exclusive\n"); break; case FOREACH_VTOP: if (!fd->args) error(FATAL, "foreach command requires address argument\n"); if (fd->reference) error(FATAL, "vtop command does not support -R option\n"); if ((fd->flags & (FOREACH_u_FLAG|FOREACH_k_FLAG)) == (FOREACH_u_FLAG|FOREACH_k_FLAG)) error(FATAL, "vtop: -u and -k options are mutually exclusive\n"); break; case FOREACH_VM: if ((fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) == (FOREACH_x_FLAG|FOREACH_d_FLAG)) error(FATAL, "vm: -x and -d options are mutually exclusive\n"); if (count_bits_long(fd->flags & (FOREACH_i_FLAG|FOREACH_p_FLAG| FOREACH_m_FLAG|FOREACH_v_FLAG)) > 1) error(FATAL, "vm command accepts only one of -p, -m or -v flags\n"); if (fd->reference) { if (fd->flags & FOREACH_i_FLAG) error(FATAL, "vm: -i is not applicable to the -R option\n"); if (fd->flags & FOREACH_m_FLAG) error(FATAL, "vm: -m is not applicable to the -R option\n"); if (fd->flags & FOREACH_v_FLAG) error(FATAL, "vm: -v is not applicable to the -R option\n"); } break; case FOREACH_BT: if ((fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) == (FOREACH_x_FLAG|FOREACH_d_FLAG)) error(FATAL, "bt: -x and -d options are mutually exclusive\n"); if ((fd->flags & FOREACH_l_FLAG) && NO_LINE_NUMBERS()) { error(INFO, "line numbers are not available\n"); fd->flags &= ~FOREACH_l_FLAG; } #ifndef GDB_5_3 if ((fd->flags & FOREACH_g_FLAG)) error(FATAL, "bt -g option is not supported when issued from foreach\n"); #endif bt = &bt_info; break; case FOREACH_TASK: if ((fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) == (FOREACH_x_FLAG|FOREACH_d_FLAG)) error(FATAL, "task: -x and -d options are mutually exclusive\n"); if (count_bits_long(fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) > 1) error(FATAL, "task command accepts -R member[,member]," " and either -x or -d flags\n"); break; case FOREACH_SET: if (fd->reference) error(FATAL, "set command does not support -R option\n"); break; case FOREACH_SIG: if (fd->flags & (FOREACH_l_FLAG|FOREACH_s_FLAG)) error(FATAL, "sig: -l and -s options are not applicable\n"); if (fd->flags & FOREACH_g_FLAG) { if (!hq_open()) { error(INFO, "cannot hash thread group tasks\n"); fd->flags &= ~FOREACH_g_FLAG; } else print_header = FALSE; } break; case FOREACH_PS: if (count_bits_long(fd->flags & FOREACH_PS_EXCLUSIVE) > 1) error(FATAL, ps_exclusive); if ((fd->flags & (FOREACH_l_FLAG|FOREACH_m_FLAG)) && (fd->flags & FOREACH_G_FLAG)) error(FATAL, "-G not supported with -%c option\n", fd->flags & FOREACH_l_FLAG ? 'l' : 'm'); BZERO(&psinfo, sizeof(struct psinfo)); if (fd->flags & FOREACH_G_FLAG) { if (!hq_open()) { error(INFO, "cannot hash thread group tasks\n"); fd->flags &= ~FOREACH_G_FLAG; } } if (fd->flags & (FOREACH_l_FLAG|FOREACH_m_FLAG)) sort_context_array_by_last_run(); if ((fd->flags & FOREACH_m_FLAG) && INVALID_MEMBER(rq_timestamp)) option_not_supported('m'); print_header = FALSE; break; case FOREACH_FILES: if (fd->flags & FOREACH_p_FLAG) error(FATAL, "files command does not support -p option\n"); break; case FOREACH_TEST: break; } } subsequent = FALSE; specified = (fd->tasks || fd->pids || fd->comms || fd->regexs || (fd->flags & FOREACH_SPECIFIED)); ref = &reference; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { doit = FALSE; if ((fd->flags & FOREACH_ACTIVE) && !is_task_active(tc->task)) continue; if ((fd->flags & FOREACH_USER) && is_kernel_thread(tc->task)) continue; if ((fd->flags & FOREACH_KERNEL) && !is_kernel_thread(tc->task)) continue; if (fd->flags & FOREACH_STATE) { if (fd->state == _RUNNING_) { if (task_state(tc->task) != _RUNNING_) continue; } else if (!(task_state(tc->task) & fd->state)) continue; } if (specified) { for (j = 0; j < fd->tasks; j++) { if (fd->task_array[j] == tc->task) { doit = TRUE; break; } } for (j = 0; !doit && (j < fd->pids); j++) { if (fd->pid_array[j] == tc->pid) { doit = TRUE; break; } } for (j = 0; !doit && (j < fd->comms); j++) { strlcpy(buf, fd->comm_array[j], TASK_COMM_LEN); if (STREQ(buf, tc->comm)) { doit = TRUE; break; } } for (j = 0; !doit && (j < fd->regexs); j++) { if (regexec(&fd->regex_info[j].regex, tc->comm, 0, NULL, 0) == 0) { doit = TRUE; break; } } } else doit = TRUE; if (!doit) continue; if (output_closed() || received_SIGINT()) { free_all_bufs(); goto foreach_bailout; } if (setjmp(pc->foreach_loop_env)) { free_all_bufs(); continue; } pc->flags |= IN_FOREACH; if (fd->reference) { BZERO(ref, sizeof(struct reference)); ref->str = fd->reference; } else if (print_header) print_task_header(fp, tc, subsequent++); for (k = 0; k < fd->keys; k++) { free_all_bufs(); switch(fd->keyword_array[k]) { case FOREACH_BT: pc->curcmd = "bt"; BZERO(bt, sizeof(struct bt_info));; bt->task = tc->task; bt->tc = tc; bt->stackbase = GET_STACKBASE(tc->task); bt->stacktop = GET_STACKTOP(tc->task); if (fd->flags & FOREACH_r_FLAG) bt->flags |= BT_RAW; if (fd->flags & FOREACH_s_FLAG) bt->flags |= BT_SYMBOL_OFFSET; if (fd->flags & FOREACH_t_FLAG) bt->flags |= BT_TEXT_SYMBOLS; if (fd->flags & FOREACH_T_FLAG) { bt->flags |= BT_TEXT_SYMBOLS; bt->flags |= BT_TEXT_SYMBOLS_ALL; } if ((fd->flags & FOREACH_o_FLAG) || (kt->flags & USE_OLD_BT)) bt->flags |= BT_OLD_BACK_TRACE; if (fd->flags & FOREACH_e_FLAG) bt->flags |= BT_EFRAME_SEARCH; #ifdef GDB_5_3 if (fd->flags & FOREACH_g_FLAG) bt->flags |= BT_USE_GDB; #endif if (fd->flags & FOREACH_l_FLAG) bt->flags |= BT_LINE_NUMBERS; if (fd->flags & FOREACH_f_FLAG) bt->flags |= BT_FULL; if (fd->flags & FOREACH_F_FLAG) bt->flags |= (BT_FULL|BT_FULL_SYM_SLAB); if (fd->flags & FOREACH_F_FLAG2) bt->flags |= BT_FULL_SYM_SLAB2; if (fd->flags & FOREACH_x_FLAG) bt->radix = 16; if (fd->flags & FOREACH_d_FLAG) bt->radix = 10; if (fd->reference) bt->ref = ref; back_trace(bt); break; case FOREACH_VM: pc->curcmd = "vm"; cmdflags = 0; if (fd->flags & FOREACH_x_FLAG) cmdflags = PRINT_RADIX_16; else if (fd->flags & FOREACH_d_FLAG) cmdflags = PRINT_RADIX_10; if (fd->flags & FOREACH_i_FLAG) vm_area_dump(tc->task, PRINT_INODES, 0, NULL); else if (fd->flags & FOREACH_p_FLAG) vm_area_dump(tc->task, PHYSADDR, 0, fd->reference ? ref : NULL); else if (fd->flags & FOREACH_m_FLAG) vm_area_dump(tc->task, PRINT_MM_STRUCT|cmdflags, 0, NULL); else if (fd->flags & FOREACH_v_FLAG) vm_area_dump(tc->task, PRINT_VMA_STRUCTS|cmdflags, 0, NULL); else vm_area_dump(tc->task, 0, 0, fd->reference ? ref : NULL); break; case FOREACH_TASK: pc->curcmd = "task"; if (fd->flags & FOREACH_x_FLAG) radix = 16; else if (fd->flags & FOREACH_d_FLAG) radix = 10; else radix = pc->output_radix; do_task(tc->task, FOREACH_TASK, fd->reference ? ref : NULL, radix); break; case FOREACH_SIG: pc->curcmd = "sig"; if (fd->flags & FOREACH_g_FLAG) { tgid = task_tgid(tc->task); tgc = tgid_to_context(tgid); if (hq_enter(tgc->task)) do_sig_thread_group(tgc->task); } else do_sig(tc->task, FOREACH_SIG, fd->reference ? ref : NULL); break; case FOREACH_SET: pc->curcmd = "set"; show_context(tc); break; case FOREACH_PS: pc->curcmd = "ps"; psinfo.task[0] = tc->task; psinfo.pid[0] = NO_PID; psinfo.type[0] = PS_BY_TASK; psinfo.argc = 1; cmdflags = PS_BY_TASK; if (subsequent++) cmdflags |= PS_NO_HEADER; if (fd->flags & FOREACH_G_FLAG) cmdflags |= PS_GROUP; if (fd->flags & FOREACH_s_FLAG) cmdflags |= PS_KSTACKP; /* * mutually exclusive flags */ if (fd->flags & FOREACH_a_FLAG) cmdflags |= PS_ARGV_ENVP; else if (fd->flags & FOREACH_c_FLAG) cmdflags |= PS_CHILD_LIST; else if (fd->flags & FOREACH_p_FLAG) cmdflags |= PS_PPID_LIST; else if (fd->flags & FOREACH_t_FLAG) cmdflags |= PS_TIMES; else if (fd->flags & FOREACH_l_FLAG) cmdflags |= PS_LAST_RUN; else if (fd->flags & FOREACH_m_FLAG) cmdflags |= PS_MSECS; else if (fd->flags & FOREACH_r_FLAG) cmdflags |= PS_RLIMIT; else if (fd->flags & FOREACH_g_FLAG) cmdflags |= PS_TGID_LIST; show_ps(cmdflags, &psinfo); break; case FOREACH_FILES: pc->curcmd = "files"; cmdflags = 0; if (fd->flags & FOREACH_i_FLAG) cmdflags |= PRINT_INODES; if (fd->flags & FOREACH_c_FLAG) cmdflags |= PRINT_NRPAGES; open_files_dump(tc->task, cmdflags, fd->reference ? ref : NULL); break; case FOREACH_NET: pc->curcmd = "net"; if (fd->flags & (FOREACH_s_FLAG|FOREACH_S_FLAG)) dump_sockets_workhorse(tc->task, fd->flags, fd->reference ? ref : NULL); break; case FOREACH_VTOP: pc->curcmd = "vtop"; cmdflags = 0; if (fd->flags & FOREACH_c_FLAG) cmdflags |= USE_USER_PGD; if (fd->flags & FOREACH_u_FLAG) cmdflags |= UVADDR; if (fd->flags & FOREACH_k_FLAG) cmdflags |= KVADDR; for (a = 0; a < fd->args; a++) { do_vtop(htol((char *)fd->arg_array[a], FAULT_ON_ERROR, NULL), tc, cmdflags); } break; case FOREACH_TEST: pc->curcmd = "test"; foreach_test(tc->task, 0); break; } pc->curcmd = "foreach"; } } /* * Post-process any commands requiring it. */ for (k = 0; k < fd->keys; k++) { switch(fd->keyword_array[k]) { case FOREACH_SIG: if (fd->flags & FOREACH_g_FLAG) hq_close(); break; } } foreach_bailout: pc->flags &= ~IN_FOREACH; } /* * Clean up regex buffers and pattern strings. */ static void foreach_cleanup(void *arg) { int i; struct foreach_data *fd; pc->cmd_cleanup = NULL; pc->cmd_cleanup_arg = NULL; fd = (struct foreach_data *)arg; for (i = 0; i < fd->regexs; i++) { regfree(&fd->regex_info[i].regex); free(fd->regex_info[i].pattern); } } /* * The currently available set of foreach commands. */ static int is_foreach_keyword(char *s, int *key) { if (STREQ(args[optind], "bt")) { *key = FOREACH_BT; return TRUE; } if (STREQ(args[optind], "vm")) { *key = FOREACH_VM; return TRUE; } if (STREQ(args[optind], "task")) { *key = FOREACH_TASK; return TRUE; } if (STREQ(args[optind], "set")) { *key = FOREACH_SET; return TRUE; } if (STREQ(args[optind], "files")) { *key = FOREACH_FILES; return TRUE; } if (STREQ(args[optind], "net")) { *key = FOREACH_NET; return TRUE; } if (STREQ(args[optind], "vtop")) { *key = FOREACH_VTOP; return TRUE; } if (STREQ(args[optind], "sig")) { *key = FOREACH_SIG; return TRUE; } if (STREQ(args[optind], "test")) { *key = FOREACH_TEST; return TRUE; } if (STREQ(args[optind], "ps")) { *key = FOREACH_PS; return TRUE; } return FALSE; } /* * Try the dumpfile-specific manner of finding the panic task first. If * that fails, find the panic task the hard way -- do a "foreach bt" in the * background, and look for the only one that has "panic" embedded in it. */ static struct task_context * panic_search(void) { struct foreach_data foreach_data, *fd; char *p1, *p2, *tp; ulong lasttask, dietask, found; char buf[BUFSIZE]; struct task_context *tc; if ((lasttask = get_dumpfile_panic_task())) { found = TRUE; goto found_panic_task; } if (pc->flags2 & LIVE_DUMP) return NULL; BZERO(&foreach_data, sizeof(struct foreach_data)); fd = &foreach_data; fd->keys = 1; fd->keyword_array[0] = FOREACH_BT; if (machine_type("S390X")) fd->flags |= FOREACH_o_FLAG; else fd->flags |= (FOREACH_t_FLAG|FOREACH_o_FLAG); dietask = lasttask = NO_TASK; found = FALSE; open_tmpfile(); foreach(fd); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if ((p1 = strstr(buf, " TASK: "))) { p1 += strlen(" TASK: "); p2 = p1; while (!whitespace(*p2)) p2++; *p2 = NULLCHAR; lasttask = htol(p1, RETURN_ON_ERROR, NULL); } if (strstr(buf, " panic at ")) { found = TRUE; break; } if (strstr(buf, " crash_kexec at ") || strstr(buf, " .crash_kexec at ")) { found = TRUE; break; } if (strstr(buf, " die at ")) { switch (dietask) { case NO_TASK: dietask = lasttask; break; default: if (dietask != lasttask) dietask = NO_TASK+1; break; } } } close_tmpfile(); if (!found && (dietask > (NO_TASK+1)) && task_has_cpu(dietask, NULL)) { lasttask = dietask; found = TRUE; } if (dietask == (NO_TASK+1)) error(WARNING, "multiple active tasks have called die\n\n"); if (CRASHDEBUG(1) && found) error(INFO, "panic_search: %lx (via foreach bt)\n", lasttask); found_panic_task: populate_panic_threads(); if (found) { if ((tc = task_to_context(lasttask))) return tc; /* * If the task list was corrupted, add this one in. */ if ((tp = fill_task_struct(lasttask))) { if ((tc = store_context(NULL, lasttask, tp))) { tt->running_tasks++; return tc; } } } if (CRASHDEBUG(1)) error(INFO, "panic_search: failed (via foreach bt)\n"); return NULL; } /* * Get the panic task from the appropriate dumpfile handler. */ static ulong get_dumpfile_panic_task(void) { ulong task; if (NETDUMP_DUMPFILE()) { task = pc->flags & REM_NETDUMP ? tt->panic_task : get_netdump_panic_task(); if (task) return task; } else if (KDUMP_DUMPFILE()) { task = get_kdump_panic_task(); if (task) return task; } else if (DISKDUMP_DUMPFILE()) { task = get_diskdump_panic_task(); if (task) return task; } else if (KVMDUMP_DUMPFILE()) { task = get_kvmdump_panic_task(); if (task) return task; } else if (XENDUMP_DUMPFILE()) { task = get_xendump_panic_task(); if (task) return task; } else if (LKCD_DUMPFILE()) return(get_lkcd_panic_task()); if (pc->flags2 & LIVE_DUMP) return NO_TASK; if (get_active_set()) return(get_active_set_panic_task()); return NO_TASK; } /* * If runqueues is defined in the kernel, get the panic threads from the * active set. * * If it's an LKCD dump, or for some other reason the active threads cannot * be determined, do it the hard way. * * NOTE: this function should be deprecated -- the work should have been * done in the initial task table refresh. */ static void populate_panic_threads(void) { int i; int found; struct task_context *tc; if (get_active_set()) { for (i = 0; i < NR_CPUS; i++) tt->panic_threads[i] = tt->active_set[i]; return; } found = 0; if (!(machdep->flags & HWRESET)) { for (i = 0; i < kt->cpus; i++) { if (tt->panic_threads[i]) { if (++found == kt->cpus) return; } } } tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (task_has_cpu(tc->task, NULL) && (tc->processor >= 0) && (tc->processor < NR_CPUS)) { tt->panic_threads[tc->processor] = tc->task; found++; } } if (!found && !(kt->flags & SMP) && (LKCD_DUMPFILE() || NETDUMP_DUMPFILE() || KDUMP_DUMPFILE() || DISKDUMP_DUMPFILE() || KVMDUMP_DUMPFILE())) tt->panic_threads[0] = get_dumpfile_panic_task(); } /* * Separate the foreach command's output on a task-by-task basis by * displaying this header string. */ void print_task_header(FILE *out, struct task_context *tc, int newline) { char buf[BUFSIZE]; char buf1[BUFSIZE]; fprintf(out, "%sPID: %-5ld TASK: %s CPU: %-2s COMMAND: \"%s\"\n", newline ? "\n" : "", tc->pid, mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(tc->task)), task_cpu(tc->processor, buf, !VERBOSE), tc->comm); } /* * "help -t" output */ void dump_task_table(int verbose) { int i, j, more, nr_cpus; struct task_context *tc; struct tgid_context *tg; char buf[BUFSIZE]; int others, wrap, flen; tc = tt->current; others = 0; more = FALSE; fprintf(fp, " current: %lx [%ld]\n", (ulong)tt->current, (ulong)(tt->current - tt->context_array)); if (tt->current) { fprintf(fp, " .pid: %ld\n", tc->pid); fprintf(fp, " .comm: \"%s\"\n", tc->comm); fprintf(fp, " .task: %lx\n", tc->task); fprintf(fp, " .thread_info: %lx\n", tc->thread_info); fprintf(fp, " .processor: %d\n", tc->processor); fprintf(fp, " .ptask: %lx\n", tc->ptask); fprintf(fp, " .mm_struct: %lx\n", tc->mm_struct); fprintf(fp, " .tc_next: %lx\n", (ulong)tc->tc_next); } fprintf(fp, " context_array: %lx\n", (ulong)tt->context_array); fprintf(fp, " tgid_array: %lx\n", (ulong)tt->tgid_array); fprintf(fp, " tgid_searches: %ld\n", tt->tgid_searches); fprintf(fp, " tgid_cache_hits: %ld (%ld%%)\n", tt->tgid_cache_hits, tt->tgid_searches ? tt->tgid_cache_hits * 100 / tt->tgid_searches : 0); fprintf(fp, " last_tgid: %lx\n", (ulong)tt->last_tgid); fprintf(fp, "refresh_task_table: "); if (tt->refresh_task_table == refresh_fixed_task_table) fprintf(fp, "refresh_fixed_task_table()\n"); else if (tt->refresh_task_table == refresh_unlimited_task_table) fprintf(fp, "refresh_unlimited_task_table()\n"); else if (tt->refresh_task_table == refresh_pidhash_task_table) fprintf(fp, "refresh_pidhash_task_table()\n"); else if (tt->refresh_task_table == refresh_pid_hash_task_table) fprintf(fp, "refresh_pid_hash_task_table()\n"); else if (tt->refresh_task_table == refresh_hlist_task_table) fprintf(fp, "refresh_hlist_task_table()\n"); else if (tt->refresh_task_table == refresh_hlist_task_table_v2) fprintf(fp, "refresh_hlist_task_table_v2()\n"); else if (tt->refresh_task_table == refresh_hlist_task_table_v3) fprintf(fp, "refresh_hlist_task_table_v3()\n"); else if (tt->refresh_task_table == refresh_active_task_table) fprintf(fp, "refresh_active_task_table()\n"); else fprintf(fp, "%lx\n", (ulong)tt->refresh_task_table); buf[0] = NULLCHAR; fprintf(fp, " flags: %lx ", tt->flags); sprintf(buf, "("); if (tt->flags & TASK_INIT_DONE) sprintf(&buf[strlen(buf)], "%sTASK_INIT_DONE", others++ ? "|" : ""); if (tt->flags & TASK_ARRAY_EXISTS) sprintf(&buf[strlen(buf)], "%sTASK_ARRAY_EXISTS", others++ ? "|" : ""); if (tt->flags & PANIC_TASK_NOT_FOUND) sprintf(&buf[strlen(buf)], "%sPANIC_TASK_NOT_FOUND", others++ ? "|" : ""); if (tt->flags & TASK_REFRESH) sprintf(&buf[strlen(buf)], "%sTASK_REFRESH", others++ ? "|" : ""); if (tt->flags & TASK_REFRESH_OFF) sprintf(&buf[strlen(buf)], "%sTASK_REFRESH_OFF", others++ ? "|" : ""); if (tt->flags & PANIC_KSP) sprintf(&buf[strlen(buf)], "%sPANIC_KSP", others++ ? "|" : ""); if (tt->flags & POPULATE_PANIC) sprintf(&buf[strlen(buf)], "%sPOPULATE_PANIC", others++ ? "|" : ""); if (tt->flags & ACTIVE_SET) sprintf(&buf[strlen(buf)], "%sACTIVE_SET", others++ ? "|" : ""); if (tt->flags & PIDHASH) sprintf(&buf[strlen(buf)], "%sPIDHASH", others++ ? "|" : ""); if (tt->flags & PID_HASH) sprintf(&buf[strlen(buf)], "%sPID_HASH", others++ ? "|" : ""); if (tt->flags & THREAD_INFO) sprintf(&buf[strlen(buf)], "%sTHREAD_INFO", others++ ? "|" : ""); if (tt->flags & IRQSTACKS) sprintf(&buf[strlen(buf)], "%sIRQSTACKS", others++ ? "|" : ""); if (tt->flags & TIMESPEC) sprintf(&buf[strlen(buf)], "%sTIMESPEC", others++ ? "|" : ""); if (tt->flags & NO_TIMESPEC) sprintf(&buf[strlen(buf)], "%sNO_TIMESPEC", others++ ? "|" : ""); if (tt->flags & ACTIVE_ONLY) sprintf(&buf[strlen(buf)], "%sACTIVE_ONLY", others++ ? "|" : ""); sprintf(&buf[strlen(buf)], ")"); if (strlen(buf) > 54) fprintf(fp, "\n%s\n", mkstring(buf, 80, CENTER|LJUST, NULL)); else fprintf(fp, "%s\n", buf); fprintf(fp, " task_start: %lx\n", tt->task_start); fprintf(fp, " task_end: %lx\n", tt->task_end); fprintf(fp, " task_local: %lx\n", (ulong)tt->task_local); fprintf(fp, " max_tasks: %d\n", tt->max_tasks); fprintf(fp, " nr_threads: %d\n", tt->nr_threads); fprintf(fp, " running_tasks: %ld\n", tt->running_tasks); fprintf(fp, " retries: %ld\n", tt->retries); fprintf(fp, " panicmsg: \"%s\"\n", strip_linefeeds(get_panicmsg(buf))); fprintf(fp, " panic_processor: %d\n", tt->panic_processor); fprintf(fp, " panic_task: %lx\n", tt->panic_task); fprintf(fp, " this_task: %lx\n", tt->this_task); fprintf(fp, " pidhash_len: %d\n", tt->pidhash_len); fprintf(fp, " pidhash_addr: %lx\n", tt->pidhash_addr); fprintf(fp, " last_task_read: %lx\n", tt->last_task_read); fprintf(fp, " last_mm_read: %lx\n", tt->last_mm_read); fprintf(fp, " task_struct: %lx\n", (ulong)tt->task_struct); fprintf(fp, " mm_struct: %lx\n", (ulong)tt->mm_struct); fprintf(fp, " init_pid_ns: %lx\n", tt->init_pid_ns); fprintf(fp, " filepages: %ld\n", tt->filepages); fprintf(fp, " anonpages: %ld\n", tt->anonpages); wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS; fprintf(fp, " idle_threads:"); for (i = 0; i < nr_cpus; i++) { if (!tt->idle_threads) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->idle_threads[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->idle_threads[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " active_set:"); for (i = 0; i < nr_cpus; i++) { if (!tt->active_set) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->active_set[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->active_set[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " panic_threads:"); for (i = 0; i < nr_cpus; i++) { if (!tt->panic_threads) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->panic_threads[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->panic_threads[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " panic_ksp:"); for (i = 0; i < nr_cpus; i++) { if (!tt->panic_ksp) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->panic_ksp[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->panic_ksp[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " hardirq_ctx:"); for (i = 0; i < nr_cpus; i++) { if (!tt->hardirq_ctx) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->hardirq_ctx[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->hardirq_ctx[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " hardirq_tasks:"); for (i = 0; i < nr_cpus; i++) { if (!tt->hardirq_tasks) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->hardirq_tasks[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->hardirq_tasks[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " softirq_ctx:"); for (i = 0; i < nr_cpus; i++) { if (!tt->softirq_ctx) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->softirq_ctx[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->softirq_ctx[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " softirq_tasks:"); for (i = 0; i < nr_cpus; i++) { if (!tt->softirq_tasks) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->softirq_tasks[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->softirq_tasks[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); dump_task_states(); if (!verbose) return; if (tt->flags & THREAD_INFO) fprintf(fp, "\nINDEX TASK/THREAD_INFO PID CPU PTASK MM_STRUCT COMM\n"); else fprintf(fp, "\nINDEX TASK PID CPU PTASK MM_STRUCT COMM\n"); tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (tt->flags & THREAD_INFO) fprintf(fp, "[%3d] %08lx/%08lx %5ld %d %08lx %08lx %s\n", i, tc->task, tc->thread_info, tc->pid, tc->processor, tc->ptask, (ulong)tc->mm_struct, tc->comm); else fprintf(fp, "[%3d] %08lx %5ld %d %08lx %08lx %s\n", i, tc->task, tc->pid, tc->processor, tc->ptask, (ulong)tc->mm_struct, tc->comm); } fprintf(fp, "\nINDEX TASK TGID (COMM)\n"); for (i = 0; i < RUNNING_TASKS(); i++) { tg = &tt->tgid_array[i]; tc = task_to_context(tg->task); fprintf(fp, "[%3d] %lx %ld (%s)\n", i, tg->task, tg->tgid, tc->comm); } } /* * Determine whether a task is a kernel thread. This would seem easier than * it looks, but on live systems it's easy to get faked out. */ int is_kernel_thread(ulong task) { struct task_context *tc; ulong mm; tc = task_to_context(task); if ((tc->pid == 0) && !STREQ(tc->comm, pc->program_name)) return TRUE; if (_ZOMBIE_ == TASK_STATE_UNINITIALIZED) initialize_task_state(); if (IS_ZOMBIE(task) || IS_EXITING(task)) return FALSE; /* * Check for shifting sands on a live system. */ mm = task_mm(task, TRUE); if (ACTIVE() && (mm != tc->mm_struct)) return FALSE; /* * Later version Linux kernel threads have no mm_struct at all. * Earlier version kernel threads point to common init_mm. */ if (!tc->mm_struct) { if (IS_EXITING(task)) return FALSE; if (!task_state(task) && !task_flags(task)) return FALSE; return TRUE; } else if (tc->mm_struct == symbol_value("init_mm")) return TRUE; return FALSE; } /* * Gather an arry of pointers to the per-cpu idle tasks. The tasklist * argument must be at least the size of ulong[NR_CPUS]. There may be * junk in everything after the first entry on a single CPU box, so the * data gathered may be throttled by kt->cpus. */ void get_idle_threads(ulong *tasklist, int nr_cpus) { int i, cnt; ulong runq, runqaddr; char *runqbuf; struct syment *rq_sp; BZERO(tasklist, sizeof(ulong) * NR_CPUS); runqbuf = NULL; cnt = 0; if ((rq_sp = per_cpu_symbol_search("per_cpu__runqueues")) && VALID_MEMBER(runqueue_idle)) { runqbuf = GETBUF(SIZE(runqueue)); for (i = 0; i < nr_cpus; i++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[i]; else runq = rq_sp->value; readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "runqueues entry (per_cpu)", FAULT_ON_ERROR); tasklist[i] = ULONG(runqbuf + OFFSET(runqueue_idle)); if (IS_KVADDR(tasklist[i])) cnt++; } } else if (symbol_exists("runqueues") && VALID_MEMBER(runqueue_idle)) { runq = symbol_value("runqueues"); runqbuf = GETBUF(SIZE(runqueue)); for (i = 0; i < nr_cpus; i++, runq += SIZE(runqueue)) { readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "runqueues entry (old)", FAULT_ON_ERROR); tasklist[i] = ULONG(runqbuf + OFFSET(runqueue_idle)); if (IS_KVADDR(tasklist[i])) cnt++; } } else if (symbol_exists("runqueues") && VALID_MEMBER(runqueue_cpu)) { runq = symbol_value("runqueues"); runqbuf = GETBUF(SIZE(runqueue)); for (i = 0; i < nr_cpus; i++) { runqaddr = runq + (SIZE(runqueue) * rq_idx(i)); readmem(runqaddr, KVADDR, runqbuf, SIZE(runqueue), "runqueues entry", FAULT_ON_ERROR); if ((tasklist[i] = get_idle_task(i, runqbuf))) cnt++; } } else if (symbol_exists("init_tasks")) { readmem(symbol_value("init_tasks"), KVADDR, tasklist, sizeof(void *) * nr_cpus, "init_tasks array", FAULT_ON_ERROR); if (IS_KVADDR(tasklist[0])) cnt++; else BZERO(tasklist, sizeof(ulong) * NR_CPUS); } else if (OPENVZ()) { runq = symbol_value("pcpu_info"); runqbuf = GETBUF(SIZE(pcpu_info)); for (i = 0; i < nr_cpus; i++, runq += SIZE(pcpu_info)) { readmem(runq, KVADDR, runqbuf, SIZE(pcpu_info), "pcpu info", FAULT_ON_ERROR); tasklist[i] = ULONG(runqbuf + OFFSET(pcpu_info_idle)); if (IS_KVADDR(tasklist[i])) cnt++; } } if (runqbuf) FREEBUF(runqbuf); if (!cnt) { error(INFO, "cannot determine idle task addresses from init_tasks[] or runqueues[]\n"); tasklist[0] = symbol_value("init_task_union"); } } /* * Emulate the kernel rq_idx() macro. */ static long rq_idx(int cpu) { if (kt->runq_siblings == 1) return cpu; else if (!(kt->__rq_idx)) return 0; else return kt->__rq_idx[cpu]; } /* * Emulate the kernel cpu_idx() macro. */ static long cpu_idx(int cpu) { if (kt->runq_siblings == 1) return 0; else if (!(kt->__cpu_idx)) return 0; else return kt->__cpu_idx[cpu]; } /* * Dig out the idle task data from a runqueue structure. */ static ulong get_idle_task(int cpu, char *runqbuf) { ulong idle_task; idle_task = ULONG(runqbuf + OFFSET(runqueue_cpu) + (SIZE(cpu_s) * cpu_idx(cpu)) + OFFSET(cpu_s_idle)); if (IS_KVADDR(idle_task)) return idle_task; else { if (cpu < kt->cpus) error(INFO, "cannot determine idle task for cpu %d\n", cpu); return NO_TASK; } } /* * Dig out the current task data from a runqueue structure. */ static ulong get_curr_task(int cpu, char *runqbuf) { ulong curr_task; curr_task = ULONG(runqbuf + OFFSET(runqueue_cpu) + (SIZE(cpu_s) * cpu_idx(cpu)) + OFFSET(cpu_s_curr)); if (IS_KVADDR(curr_task)) return curr_task; else return NO_TASK; } /* * On kernels with runqueue[] array, store the active set of tasks. */ int get_active_set(void) { int i, cnt; ulong runq, runqaddr; char *runqbuf; struct syment *rq_sp; if (tt->flags & ACTIVE_SET) return TRUE; runq = 0; rq_sp = per_cpu_symbol_search("per_cpu__runqueues"); if (!rq_sp) { if (symbol_exists("runqueues")) runq = symbol_value("runqueues"); else if (OPENVZ()) runq = symbol_value("pcpu_info"); else return FALSE; } else runq = rq_sp->value; if (!tt->active_set && !(tt->active_set = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc active_set array"); runqbuf = GETBUF(SIZE(runqueue)); cnt = 0; if (OPENVZ()) { ulong vcpu_struct; char *pcpu_info_buf, *vcpu_struct_buf; pcpu_info_buf = GETBUF(SIZE(pcpu_info)); vcpu_struct_buf = GETBUF(SIZE(vcpu_struct)); for (i = 0; i < kt->cpus; i++, runq += SIZE(pcpu_info)) { readmem(runq, KVADDR, pcpu_info_buf, SIZE(pcpu_info), "pcpu_info", FAULT_ON_ERROR); vcpu_struct= ULONG(pcpu_info_buf + OFFSET(pcpu_info_vcpu)); readmem(vcpu_struct, KVADDR, vcpu_struct_buf, SIZE(vcpu_struct), "pcpu_info->vcpu", FAULT_ON_ERROR); tt->active_set[i] = ULONG(vcpu_struct_buf + OFFSET(vcpu_struct_rq) + OFFSET(runqueue_curr)); if (IS_KVADDR(tt->active_set[i])) cnt++; } FREEBUF(pcpu_info_buf); FREEBUF(vcpu_struct_buf); } else if (VALID_MEMBER(runqueue_curr) && rq_sp) { for (i = 0; i < kt->cpus; i++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[i]; else runq = rq_sp->value; readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "active runqueues entry (per_cpu)", FAULT_ON_ERROR); tt->active_set[i] = ULONG(runqbuf + OFFSET(runqueue_curr)); if (IS_KVADDR(tt->active_set[i])) cnt++; } } else if (VALID_MEMBER(runqueue_curr)) { for (i = 0; i < MAX(kt->cpus, kt->kernel_NR_CPUS); i++, runq += SIZE(runqueue)) { readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "(old) runqueues curr", FAULT_ON_ERROR); tt->active_set[i] = ULONG(runqbuf + OFFSET(runqueue_curr)); if (IS_KVADDR(tt->active_set[i])) cnt++; } } else if (VALID_MEMBER(runqueue_cpu)) { for (i = 0; i < kt->cpus; i++) { runqaddr = runq + (SIZE(runqueue) * rq_idx(i)); readmem(runqaddr, KVADDR, runqbuf, SIZE(runqueue), "runqueues curr", FAULT_ON_ERROR); if ((tt->active_set[i] = get_curr_task(i, runqbuf))) cnt++; } } if (cnt) { tt->flags |= ACTIVE_SET; return TRUE; } else { error(INFO, "get_active_set: no tasks found?\n"); return FALSE; } } /* * Clear the ACTIVE_SET flag on a live system, forcing a re-read of the * runqueues[] array the next time get_active_set() is called above. */ void clear_active_set(void) { if (ACTIVE() && (tt->flags & TASK_REFRESH)) tt->flags &= ~ACTIVE_SET; } #define RESOLVE_PANIC_AND_DIE_CALLERS() \ if (xen_panic_task) { \ if (CRASHDEBUG(1)) \ error(INFO, \ "get_active_set_panic_task: %lx (xen_panic_event)\n", \ xen_panic_task); \ return xen_panic_task; \ } \ if (crash_kexec_task) { \ if (CRASHDEBUG(1)) \ error(INFO, \ "get_active_set_panic_task: %lx (crash_kexec)\n", \ crash_kexec_task); \ return crash_kexec_task; \ } \ if (crash_fadump_task) { \ if (CRASHDEBUG(1)) \ error(INFO, \ "get_active_set_panic_task: %lx (crash_fadump)\n", \ crash_fadump_task); \ return crash_fadump_task; \ } \ if ((panic_task > (NO_TASK+1)) && !die_task) { \ if (CRASHDEBUG(1)) \ fprintf(fp, \ "get_active_set_panic_task: %lx (panic)\n", \ panic_task); \ return panic_task; \ } \ \ if (panic_task && die_task) { \ if ((panic_task > (NO_TASK+1)) && \ (panic_task == die_task)) { \ if (CRASHDEBUG(1)) \ fprintf(fp, \ "get_active_set_panic_task: %lx (panic)\n", \ panic_task); \ return panic_task; \ } \ error(WARNING, \ "multiple active tasks have called die and/or panic\n\n"); \ goto no_panic_task_found; \ } \ \ if (die_task > (NO_TASK+1)) { \ if (CRASHDEBUG(1)) \ fprintf(fp, \ "get_active_set_panic_task: %lx (die)\n", \ die_task); \ return die_task; \ } \ else if (die_task == (NO_TASK+1)) \ error(WARNING, \ "multiple active tasks have called die\n\n"); #define SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS() \ while (fgets(buf, BUFSIZE, pc->tmpfile)) { \ if (strstr(buf, " die+")) { \ switch (die_task) \ { \ case NO_TASK: \ die_task = task; \ break; \ default: \ if (die_task != task) \ die_task = NO_TASK+1; \ break; \ } \ } \ if (strstr(buf, " panic+")) { \ switch (panic_task) \ { \ case NO_TASK: \ panic_task = task; \ if (XENDUMP_DUMPFILE()) \ xendump_panic_hook(buf); \ break; \ default: \ if (panic_task != task) \ panic_task = NO_TASK+1; \ break; \ } \ } \ if (strstr(buf, " crash_kexec+") || \ strstr(buf, " .crash_kexec+")) { \ crash_kexec_task = task; \ } \ if (strstr(buf, " .crash_fadump+")) \ crash_fadump_task = task; \ if (strstr(buf, " machine_kexec+") || \ strstr(buf, " .machine_kexec+")) { \ crash_kexec_task = task; \ } \ if (strstr(buf, " xen_panic_event+") || \ strstr(buf, " .xen_panic_event+")){ \ xen_panic_task = task; \ xendump_panic_hook(buf); \ } \ if (machine_type("IA64") && XENDUMP_DUMPFILE() && !xen_panic_task && \ strstr(buf, " sysrq_handle_crashdump+")) \ xen_sysrq_task = task; \ } /* * Search the active set tasks for instances of die or panic calls. */ static ulong get_active_set_panic_task() { int i, j, found; ulong task; char buf[BUFSIZE]; ulong panic_task, die_task, crash_kexec_task, crash_fadump_task; ulong xen_panic_task; ulong xen_sysrq_task; panic_task = die_task = crash_kexec_task = xen_panic_task = NO_TASK; xen_sysrq_task = NO_TASK; crash_fadump_task = NO_TASK; for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->active_set[i]) || !task_exists(task)) continue; open_tmpfile(); raw_stack_dump(GET_STACKBASE(task), STACKSIZE()); rewind(pc->tmpfile); SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); close_tmpfile(); } RESOLVE_PANIC_AND_DIE_CALLERS(); if (tt->flags & IRQSTACKS) { panic_task = die_task = NO_TASK; for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->hardirq_tasks[i])) continue; for (j = found = 0; j < NR_CPUS; j++) { if (task == tt->active_set[j]) { found++; break; } } if (!found) continue; open_tmpfile(); raw_stack_dump(tt->hardirq_ctx[i], SIZE(thread_union)); rewind(pc->tmpfile); SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); close_tmpfile(); } RESOLVE_PANIC_AND_DIE_CALLERS(); panic_task = die_task = NO_TASK; for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->softirq_tasks[i])) continue; for (j = found = 0; j < NR_CPUS; j++) { if (task == tt->active_set[j]) { found++; break; } } if (!found) continue; open_tmpfile(); raw_stack_dump(tt->softirq_ctx[i], SIZE(thread_union)); rewind(pc->tmpfile); SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); close_tmpfile(); } RESOLVE_PANIC_AND_DIE_CALLERS(); } if (crash_kexec_task) { if (CRASHDEBUG(1)) error(INFO, "get_active_set_panic_task: %lx (crash_kexec)\n", crash_kexec_task); return crash_kexec_task; } if (crash_fadump_task) { if (CRASHDEBUG(1)) error(INFO, "get_active_set_panic_task: %lx (crash_fadump)\n", crash_fadump_task); return crash_fadump_task; } if (xen_sysrq_task) { if (CRASHDEBUG(1)) error(INFO, "get_active_set_panic_task: %lx (sysrq_handle_crashdump)\n", xen_sysrq_task); return xen_sysrq_task; } no_panic_task_found: if (CRASHDEBUG(1)) error(INFO, "get_active_set_panic_task: failed\n"); return NO_TASK; } /* * Determine whether a task is one of the idle threads. */ int is_idle_thread(ulong task) { int i; for (i = 0; i < NR_CPUS; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * Dump the current run queue task list. This command should be expanded * to deal with timer queues, bottom halves, etc... */ void cmd_runq(void) { int c; char arg_buf[BUFSIZE]; ulong *cpus = NULL; int sched_debug = 0; int dump_timestamp_flag = 0; int dump_task_group_flag = 0; int dump_milliseconds_flag = 0; while ((c = getopt(argcnt, args, "dtgmc:")) != EOF) { switch(c) { case 'd': sched_debug = 1; break; case 't': dump_timestamp_flag = 1; break; case 'm': dump_milliseconds_flag = 1; break; case 'g': if ((INVALID_MEMBER(task_group_cfs_rq) && INVALID_MEMBER(task_group_rt_rq)) || INVALID_MEMBER(task_group_parent)) option_not_supported(c); dump_task_group_flag = 1; break; case 'c': if (pc->curcmd_flags & CPUMASK) { error(INFO, "only one -c option allowed\n"); argerrs++; } else { pc->curcmd_flags |= CPUMASK; BZERO(arg_buf, BUFSIZE); strncpy(arg_buf, optarg, strlen(optarg)); cpus = get_cpumask_buf(); make_cpumask(arg_buf, cpus, FAULT_ON_ERROR, NULL); pc->curcmd_private = (ulong)cpus; } break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (dump_timestamp_flag) dump_on_rq_timestamp(); else if (dump_milliseconds_flag) dump_on_rq_milliseconds(); else if (sched_debug) dump_on_rq_tasks(); else if (dump_task_group_flag) dump_tasks_by_task_group(); else dump_runq(); if (cpus) FREEBUF(cpus); } /* * Displays the runqueue and active task timestamps of each cpu. */ static void dump_on_rq_timestamp(void) { ulong runq; char buf[BUFSIZE]; char format[15]; struct syment *rq_sp; struct task_context *tc; int cpu, len, indent; ulonglong timestamp; ulong *cpus; indent = runq = 0; cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "per-cpu runqueues do not exist\n"); if (INVALID_MEMBER(rq_timestamp)) option_not_supported('t'); for (cpu = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if ((kt->flags & SMP) && (kt->flags &PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, ×tamp, sizeof(ulonglong), "per-cpu rq timestamp", FAULT_ON_ERROR); sprintf(buf, pc->output_radix == 10 ? "%llu" : "%llx", timestamp); fprintf(fp, "%sCPU %d: ", cpu < 10 ? " " : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, "[OFFLINE]\n"); continue; } else fprintf(fp, "%s\n", buf); len = strlen(buf); if ((tc = task_to_context(tt->active_set[cpu]))){ if (cpu < 10) indent = 7; else if (cpu < 100) indent = 8; else if (cpu < 1000) indent = 9; if (cpu < 10) indent++; timestamp = task_last_run(tc->task); sprintf(format, "%c0%dll%c", '%', len, pc->output_radix == 10 ? 'u' : 'x'); sprintf(buf, format, timestamp); fprintf(fp, "%s%s PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", space(indent), buf, tc->pid, tc->task, tc->comm); } else fprintf(fp, "\n"); } } /* * Displays the runqueue and active task timestamps of each cpu. */ static void dump_on_rq_milliseconds(void) { ulong runq; char buf[BUFSIZE]; struct syment *rq_sp; struct task_context *tc; int cpu, max_indent, indent, max_days, days; long long delta; ulonglong task_timestamp, rq_timestamp; ulong *cpus; if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "per-cpu runqueues do not exist\n"); if (INVALID_MEMBER(rq_timestamp)) option_not_supported('m'); if (kt->cpus < 10) max_indent = 1; else if (kt->cpus < 100) max_indent = 2; else if (kt->cpus < 1000) max_indent = 3; else max_indent = 4; max_days = days = 0; cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if ((kt->flags & SMP) && (kt->flags &PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, &rq_timestamp, sizeof(ulonglong), "per-cpu rq timestamp", FAULT_ON_ERROR); if (!max_days) { translate_nanoseconds(rq_timestamp, buf); max_days = first_space(buf) - buf; } if (cpu < 10) indent = max_indent; else if (cpu < 100) indent = max_indent - 1; else if (cpu < 1000) indent = max_indent - 2; else indent = max_indent - 4; if (hide_offline_cpu(cpu)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", space(indent), cpu); continue; } if ((tc = task_to_context(tt->active_set[cpu]))) task_timestamp = task_last_run(tc->task); else { fprintf(fp, "%sCPU %d: [unknown]\n", space(indent), cpu); continue; } delta = rq_timestamp - task_timestamp; if (delta < 0) delta = 0; translate_nanoseconds(delta, buf); days = first_space(buf) - buf; fprintf(fp, "%sCPU %d: [%s%s] PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", space(indent), cpu, space(max_days - days), buf, tc->pid, tc->task, tc->comm); } } /* * Dump the task run queue on behalf cmd_runq(). */ static void dump_runq(void) { int i; ulong next, runqueue_head; long offs; int qlen, cnt; ulong *tlist; struct task_context *tc; if (VALID_MEMBER(rq_cfs)) { dump_CFS_runqueues(); return; } if (VALID_MEMBER(runqueue_arrays)) { dump_runqueues(); return; } offs = runqueue_head = 0; qlen = 1000; start_again: tlist = (ulong *)GETBUF(qlen * sizeof(void *)); if (symbol_exists("runqueue_head")) { next = runqueue_head = symbol_value("runqueue_head"); offs = 0; } else if (VALID_MEMBER(task_struct_next_run)) { offs = OFFSET(task_struct_next_run); next = runqueue_head = symbol_value("init_task_union"); } else error(FATAL, "cannot determine run queue structures\n"); cnt = 0; do { if (cnt == qlen) { FREEBUF(tlist); qlen += 1000; goto start_again; } tlist[cnt++] = next; readmem(next+offs, KVADDR, &next, sizeof(void *), "run queue entry", FAULT_ON_ERROR); if (next == runqueue_head) break; } while (next); for (i = 0; i < cnt; i++) { if (tlist[i] == runqueue_head) continue; if (!(tc = task_to_context(VIRTPAGEBASE(tlist[i])))) { fprintf(fp, "PID: ? TASK: %lx CPU: ? COMMAND: ?\n", tlist[i]); continue; } if (!is_idle_thread(tc->task)) print_task_header(fp, tc, 0); } } #define RUNQ_ACTIVE (1) #define RUNQ_EXPIRED (2) static void dump_runqueues(void) { int cpu, displayed; ulong runq, offset; char *runqbuf; ulong active, expired, arrays; struct task_context *tc; struct syment *rq_sp; ulong *cpus; runq = 0; rq_sp = per_cpu_symbol_search("per_cpu__runqueues"); if (!rq_sp) { if (symbol_exists("runqueues")) runq = symbol_value("runqueues"); else error(FATAL, "cannot determine run queue structures\n"); } get_active_set(); runqbuf = GETBUF(SIZE(runqueue)); cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = displayed = 0; cpu < kt->cpus; cpu++, runq += SIZE(runqueue)) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if (rq_sp) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; } fprintf(fp, "%sCPU %d ", displayed++ ? "\n" : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, "[OFFLINE]\n"); continue; } else fprintf(fp, "RUNQUEUE: %lx\n", runq); fprintf(fp, " CURRENT: "); if ((tc = task_to_context(tt->active_set[cpu]))) fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); else fprintf(fp, "%lx\n", tt->active_set[cpu]); readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "runqueues array entry", FAULT_ON_ERROR); active = ULONG(runqbuf + OFFSET(runqueue_active)); expired = ULONG(runqbuf + OFFSET(runqueue_expired)); arrays = runq + OFFSET(runqueue_arrays); console("active: %lx\n", active); console("expired: %lx\n", expired); console("arrays: %lx\n", arrays); offset = active == arrays ? OFFSET(runqueue_arrays) : OFFSET(runqueue_arrays) + SIZE(prio_array); offset = active - runq; dump_prio_array(RUNQ_ACTIVE, active, &runqbuf[offset]); offset = expired == arrays ? OFFSET(runqueue_arrays) : OFFSET(runqueue_arrays) + SIZE(prio_array); offset = expired - runq; dump_prio_array(RUNQ_EXPIRED, expired, &runqbuf[offset]); } } static void dump_prio_array(int which, ulong k_prio_array, char *u_prio_array) { int i, c, cnt, tot, nr_active; int qheads ATTRIBUTE_UNUSED; ulong offset, kvaddr, uvaddr; ulong list_head[2]; struct list_data list_data, *ld; struct task_context *tc; ulong *tlist; qheads = (i = ARRAY_LENGTH(prio_array_queue)) ? i : get_array_length("prio_array.queue", NULL, SIZE(list_head)); console("dump_prio_array[%d]: %lx %lx\n", which, k_prio_array, (ulong)u_prio_array); nr_active = INT(u_prio_array + OFFSET(prio_array_nr_active)); console("nr_active: %d\n", nr_active); fprintf(fp, " %s PRIO_ARRAY: %lx\n", which == RUNQ_ACTIVE ? "ACTIVE" : "EXPIRED", k_prio_array); if (CRASHDEBUG(1)) fprintf(fp, "nr_active: %d\n", nr_active); ld = &list_data; for (i = tot = 0; i < 140; i++) { offset = OFFSET(prio_array_queue) + (i * SIZE(list_head)); kvaddr = k_prio_array + offset; uvaddr = (ulong)u_prio_array + offset; BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); if (CRASHDEBUG(1)) fprintf(fp, "prio_array[%d] @ %lx => %lx/%lx %s\n", i, kvaddr, list_head[0], list_head[1], (list_head[0] == list_head[1]) && (list_head[0] == kvaddr) ? "(empty)" : ""); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) continue; console("[%d] %lx => %lx-%lx ", i, kvaddr, list_head[0], list_head[1]); fprintf(fp, " [%3d] ", i); BZERO(ld, sizeof(struct list_data)); ld->start = list_head[0]; ld->list_head_offset = OFFSET(task_struct_run_list); ld->end = kvaddr; hq_open(); cnt = do_list(ld); hq_close(); console("%d entries\n", cnt); tlist = (ulong *)GETBUF((cnt) * sizeof(ulong)); cnt = retrieve_list(tlist, cnt); for (c = 0; c < cnt; c++) { if (!(tc = task_to_context(tlist[c]))) continue; if (c) INDENT(11); fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); } tot += cnt; FREEBUF(tlist); } if (!tot) { INDENT(5); fprintf(fp, "[no tasks queued]\n"); } } #define MAX_GROUP_NUM 200 struct task_group_info { int use; int depth; char *name; ulong task_group; struct task_group_info *parent; }; static struct task_group_info **tgi_array; static int tgi_p = 0; static int tgi_p_max = 0; static void sort_task_group_info_array(void) { int i, j; struct task_group_info *tmp; for (i = 0; i < tgi_p - 1; i++) { for (j = 0; j < tgi_p - i - 1; j++) { if (tgi_array[j]->depth > tgi_array[j+1]->depth) { tmp = tgi_array[j+1]; tgi_array[j+1] = tgi_array[j]; tgi_array[j] = tmp; } } } } static void print_task_group_info_array(void) { int i; for (i = 0; i < tgi_p; i++) { fprintf(fp, "%d : use=%d, depth=%d, group=%lx, ", i, tgi_array[i]->use, tgi_array[i]->depth, tgi_array[i]->task_group); fprintf(fp, "name=%s, ", tgi_array[i]->name ? tgi_array[i]->name : "NULL"); if (tgi_array[i]->parent) fprintf(fp, "parent=%lx", tgi_array[i]->parent->task_group); fprintf(fp, "\n"); } } static void free_task_group_info_array(void) { int i; for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->name) FREEBUF(tgi_array[i]->name); FREEBUF(tgi_array[i]); } tgi_p = 0; FREEBUF(tgi_array); } static void reuse_task_group_info_array(void) { int i; for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->depth == 0) tgi_array[i]->use = 0; else tgi_array[i]->use = 1; } } static void dump_task_runq_entry(struct task_context *tc, int current) { int prio; readmem(tc->task + OFFSET(task_struct_prio), KVADDR, &prio, sizeof(int), "task prio", FAULT_ON_ERROR); fprintf(fp, "[%3d] ", prio); fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"", tc->pid, tc->task, tc->comm); if (current) fprintf(fp, " [CURRENT]\n"); else fprintf(fp, "\n"); } static void print_group_header_fair(int depth, ulong cfs_rq, void *t) { int throttled; struct task_group_info *tgi = (struct task_group_info *)t; INDENT(2 + 3 * depth); fprintf(fp, "TASK_GROUP: %lx CFS_RQ: %lx ", tgi->task_group, cfs_rq); if (tgi->name) fprintf(fp, " <%s>", tgi->name); if (VALID_MEMBER(cfs_rq_throttled)) { readmem(cfs_rq + OFFSET(cfs_rq_throttled), KVADDR, &throttled, sizeof(int), "cfs_rq throttled", FAULT_ON_ERROR); if (throttled) fprintf(fp, " (THROTTLED)"); } fprintf(fp, "\n"); } static void print_parent_task_group_fair(void *t, int cpu) { struct task_group_info *tgi; ulong cfs_rq_c, cfs_rq_p; tgi = ((struct task_group_info *)t)->parent; if (tgi && tgi->use) print_parent_task_group_fair(tgi, cpu); else return; readmem(tgi->task_group + OFFSET(task_group_cfs_rq), KVADDR, &cfs_rq_c, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); readmem(cfs_rq_c + cpu * sizeof(ulong), KVADDR, &cfs_rq_p, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); print_group_header_fair(tgi->depth, cfs_rq_p, tgi); tgi->use = 0; } static int dump_tasks_in_lower_dequeued_cfs_rq(int depth, ulong cfs_rq, int cpu, struct task_context *ctc) { int i, total, nr_running; ulong group, cfs_rq_c, cfs_rq_p; total = 0; for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->use == 0 || tgi_array[i]->depth - depth != 1) continue; readmem(cfs_rq + OFFSET(cfs_rq_tg), KVADDR, &group, sizeof(ulong), "cfs_rq tg", FAULT_ON_ERROR); if (group != tgi_array[i]->parent->task_group) continue; readmem(tgi_array[i]->task_group + OFFSET(task_group_cfs_rq), KVADDR, &cfs_rq_c, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); readmem(cfs_rq_c + cpu * sizeof(ulong), KVADDR, &cfs_rq_p, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); if (cfs_rq == cfs_rq_p) continue; readmem(cfs_rq_p + OFFSET(cfs_rq_nr_running), KVADDR, &nr_running, sizeof(int), "cfs_rq nr_running", FAULT_ON_ERROR); if (nr_running == 0) { total += dump_tasks_in_lower_dequeued_cfs_rq(depth + 1, cfs_rq_p, cpu, ctc); continue; } print_parent_task_group_fair(tgi_array[i], cpu); total++; total += dump_tasks_in_task_group_cfs_rq(depth + 1, cfs_rq_p, cpu, ctc); } return total; } static int dump_tasks_in_cfs_rq(ulong cfs_rq) { struct task_context *tc; struct rb_root *root; struct rb_node *node; ulong my_q, leftmost, curr, curr_my_q; int total; total = 0; if (VALID_MEMBER(sched_entity_my_q)) { readmem(cfs_rq + OFFSET(cfs_rq_curr), KVADDR, &curr, sizeof(ulong), "curr", FAULT_ON_ERROR); if (curr) { readmem(curr + OFFSET(sched_entity_my_q), KVADDR, &curr_my_q, sizeof(ulong), "curr->my_q", FAULT_ON_ERROR); if (curr_my_q) total += dump_tasks_in_cfs_rq(curr_my_q); } } readmem(cfs_rq + OFFSET(cfs_rq_rb_leftmost), KVADDR, &leftmost, sizeof(ulong), "rb_leftmost", FAULT_ON_ERROR); root = (struct rb_root *)(cfs_rq + OFFSET(cfs_rq_tasks_timeline)); for (node = rb_first(root); leftmost && node; node = rb_next(node)) { if (VALID_MEMBER(sched_entity_my_q)) { readmem((ulong)node - OFFSET(sched_entity_run_node) + OFFSET(sched_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (my_q) { total += dump_tasks_in_cfs_rq(my_q); continue; } } tc = task_to_context((ulong)node - OFFSET(task_struct_se) - OFFSET(sched_entity_run_node)); if (!tc) continue; if (hq_enter((ulong)tc)) { INDENT(5); dump_task_runq_entry(tc, 0); } else { error(WARNING, "duplicate CFS runqueue node: task %lx\n", tc->task); return total; } total++; } return total; } static int dump_tasks_in_task_group_cfs_rq(int depth, ulong cfs_rq, int cpu, struct task_context *ctc) { struct task_context *tc; struct rb_root *root; struct rb_node *node; ulong my_q, leftmost, curr, curr_my_q, tg; int total, i; total = 0; curr_my_q = curr = 0; if (depth) { readmem(cfs_rq + OFFSET(cfs_rq_tg), KVADDR, &tg, sizeof(ulong), "cfs_rq tg", FAULT_ON_ERROR); for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->task_group == tg) { print_group_header_fair(depth, cfs_rq, tgi_array[i]); tgi_array[i]->use = 0; break; } } } if (VALID_MEMBER(sched_entity_my_q)) { readmem(cfs_rq + OFFSET(cfs_rq_curr), KVADDR, &curr, sizeof(ulong), "curr", FAULT_ON_ERROR); if (curr) { readmem(curr + OFFSET(sched_entity_my_q), KVADDR, &curr_my_q, sizeof(ulong), "curr->my_q", FAULT_ON_ERROR); if (curr_my_q) { total++; total += dump_tasks_in_task_group_cfs_rq(depth + 1, curr_my_q, cpu, ctc); } } } /* * check if "curr" is the task that is current running task */ if (!curr_my_q && ctc && (curr - OFFSET(task_struct_se)) == ctc->task) { /* curr is not in the rb tree, so let's print it here */ total++; INDENT(5 + 3 * depth); dump_task_runq_entry(ctc, 1); } readmem(cfs_rq + OFFSET(cfs_rq_rb_leftmost), KVADDR, &leftmost, sizeof(ulong), "rb_leftmost", FAULT_ON_ERROR); root = (struct rb_root *)(cfs_rq + OFFSET(cfs_rq_tasks_timeline)); for (node = rb_first(root); leftmost && node; node = rb_next(node)) { if (VALID_MEMBER(sched_entity_my_q)) { readmem((ulong)node - OFFSET(sched_entity_run_node) + OFFSET(sched_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (my_q) { total++; total += dump_tasks_in_task_group_cfs_rq(depth + 1, my_q, cpu, ctc); continue; } } tc = task_to_context((ulong)node - OFFSET(task_struct_se) - OFFSET(sched_entity_run_node)); if (!tc) continue; if (hq_enter((ulong)tc)) { INDENT(5 + 3 * depth); dump_task_runq_entry(tc, 0); } else { error(WARNING, "duplicate CFS runqueue node: task %lx\n", tc->task); return total; } total++; } total += dump_tasks_in_lower_dequeued_cfs_rq(depth, cfs_rq, cpu, ctc); if (!total) { INDENT(5 + 3 * depth); fprintf(fp, "[no tasks queued]\n"); } return total; } static void dump_on_rq_tasks(void) { char buf[BUFSIZE]; struct task_context *tc; int i, cpu, on_rq, tot; ulong *cpus; if (!VALID_MEMBER(task_struct_on_rq)) { MEMBER_OFFSET_INIT(task_struct_se, "task_struct", "se"); STRUCT_SIZE_INIT(sched_entity, "sched_entity"); MEMBER_OFFSET_INIT(sched_entity_on_rq, "sched_entity", "on_rq"); MEMBER_OFFSET_INIT(task_struct_on_rq, "task_struct", "on_rq"); MEMBER_OFFSET_INIT(task_struct_prio, "task_struct", "prio"); if (INVALID_MEMBER(task_struct_on_rq)) { if (INVALID_MEMBER(task_struct_se) || INVALID_SIZE(sched_entity)) option_not_supported('d'); } } cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; fprintf(fp, "%sCPU %d", cpu ? "\n" : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, " [OFFLINE]\n"); continue; } else fprintf(fp, "\n"); tc = FIRST_CONTEXT(); tot = 0; for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (VALID_MEMBER(task_struct_on_rq)) { readmem(tc->task + OFFSET(task_struct_on_rq), KVADDR, &on_rq, sizeof(int), "task on_rq", FAULT_ON_ERROR); } else { readmem(tc->task + OFFSET(task_struct_se), KVADDR, buf, SIZE(sched_entity), "task se", FAULT_ON_ERROR); on_rq = INT(buf + OFFSET(sched_entity_on_rq)); } if (!on_rq || tc->processor != cpu) continue; INDENT(5); dump_task_runq_entry(tc, 0); tot++; } if (!tot) { INDENT(5); fprintf(fp, "[no tasks queued]\n"); } } } static void cfs_rq_offset_init(void) { if (!VALID_STRUCT(cfs_rq)) { STRUCT_SIZE_INIT(cfs_rq, "cfs_rq"); STRUCT_SIZE_INIT(rt_rq, "rt_rq"); MEMBER_OFFSET_INIT(rq_rt, "rq", "rt"); MEMBER_OFFSET_INIT(rq_nr_running, "rq", "nr_running"); MEMBER_OFFSET_INIT(task_struct_se, "task_struct", "se"); STRUCT_SIZE_INIT(sched_entity, "sched_entity"); MEMBER_OFFSET_INIT(sched_entity_run_node, "sched_entity", "run_node"); MEMBER_OFFSET_INIT(sched_entity_cfs_rq, "sched_entity", "cfs_rq"); MEMBER_OFFSET_INIT(sched_entity_my_q, "sched_entity", "my_q"); MEMBER_OFFSET_INIT(sched_rt_entity_my_q, "sched_rt_entity", "my_q"); MEMBER_OFFSET_INIT(sched_entity_on_rq, "sched_entity", "on_rq"); MEMBER_OFFSET_INIT(cfs_rq_rb_leftmost, "cfs_rq", "rb_leftmost"); MEMBER_OFFSET_INIT(cfs_rq_nr_running, "cfs_rq", "nr_running"); MEMBER_OFFSET_INIT(cfs_rq_tasks_timeline, "cfs_rq", "tasks_timeline"); MEMBER_OFFSET_INIT(cfs_rq_curr, "cfs_rq", "curr"); MEMBER_OFFSET_INIT(rt_rq_active, "rt_rq", "active"); MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", "run_list"); MEMBER_OFFSET_INIT(task_struct_on_rq, "task_struct", "on_rq"); MEMBER_OFFSET_INIT(task_struct_prio, "task_struct", "prio"); MEMBER_OFFSET_INIT(task_struct_rt, "task_struct", "rt"); MEMBER_OFFSET_INIT(sched_rt_entity_run_list, "sched_rt_entity", "run_list"); MEMBER_OFFSET_INIT(rt_prio_array_queue, "rt_prio_array", "queue"); } } static void task_group_offset_init(void) { if (!VALID_STRUCT(task_group)) { STRUCT_SIZE_INIT(task_group, "task_group"); MEMBER_OFFSET_INIT(rt_rq_rt_nr_running, "rt_rq", "rt_nr_running"); MEMBER_OFFSET_INIT(cfs_rq_tg, "cfs_rq", "tg"); MEMBER_OFFSET_INIT(rt_rq_tg, "rt_rq", "tg"); MEMBER_OFFSET_INIT(rt_rq_highest_prio, "rt_rq", "highest_prio"); MEMBER_OFFSET_INIT(task_group_css, "task_group", "css"); MEMBER_OFFSET_INIT(cgroup_subsys_state_cgroup, "cgroup_subsys_state", "cgroup"); MEMBER_OFFSET_INIT(cgroup_dentry, "cgroup", "dentry"); MEMBER_OFFSET_INIT(cgroup_kn, "cgroup", "kn"); MEMBER_OFFSET_INIT(kernfs_node_name, "kernfs_node", "name"); MEMBER_OFFSET_INIT(kernfs_node_parent, "kernfs_node", "parent"); MEMBER_OFFSET_INIT(task_group_siblings, "task_group", "siblings"); MEMBER_OFFSET_INIT(task_group_children, "task_group", "children"); MEMBER_OFFSET_INIT(task_group_cfs_bandwidth, "task_group", "cfs_bandwidth"); MEMBER_OFFSET_INIT(cfs_rq_throttled, "cfs_rq", "throttled"); MEMBER_OFFSET_INIT(task_group_rt_bandwidth, "task_group", "rt_bandwidth"); MEMBER_OFFSET_INIT(rt_rq_rt_throttled, "rt_rq", "rt_throttled"); } } static void dump_CFS_runqueues(void) { int cpu, tot, displayed; ulong runq, cfs_rq, prio_array; char *runqbuf, *cfs_rq_buf; ulong tasks_timeline ATTRIBUTE_UNUSED; struct task_context *tc; struct rb_root *root; struct syment *rq_sp, *init_sp; ulong *cpus; cfs_rq_offset_init(); if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "per-cpu runqueues do not exist\n"); runqbuf = GETBUF(SIZE(runqueue)); if ((init_sp = per_cpu_symbol_search("per_cpu__init_cfs_rq"))) cfs_rq_buf = GETBUF(SIZE(cfs_rq)); else cfs_rq_buf = NULL; get_active_set(); cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = displayed = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; fprintf(fp, "%sCPU %d ", displayed++ ? "\n" : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, "[OFFLINE]\n"); continue; } else fprintf(fp, "RUNQUEUE: %lx\n", runq); fprintf(fp, " CURRENT: "); if ((tc = task_to_context(tt->active_set[cpu]))) fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); else fprintf(fp, "%lx\n", tt->active_set[cpu]); readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "per-cpu rq", FAULT_ON_ERROR); if (cfs_rq_buf) { /* * Use default task group's cfs_rq on each cpu. */ if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) cfs_rq = init_sp->value + kt->__per_cpu_offset[cpu]; else cfs_rq = init_sp->value; readmem(cfs_rq, KVADDR, cfs_rq_buf, SIZE(cfs_rq), "per-cpu cfs_rq", FAULT_ON_ERROR); root = (struct rb_root *)(cfs_rq + OFFSET(cfs_rq_tasks_timeline)); } else { cfs_rq = runq + OFFSET(rq_cfs); root = (struct rb_root *)(runq + OFFSET(rq_cfs) + OFFSET(cfs_rq_tasks_timeline)); } prio_array = runq + OFFSET(rq_rt) + OFFSET(rt_rq_active); fprintf(fp, " RT PRIO_ARRAY: %lx\n", prio_array); tot = dump_RT_prio_array(prio_array, &runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)]); if (!tot) { INDENT(5); fprintf(fp, "[no tasks queued]\n"); } fprintf(fp, " CFS RB_ROOT: %lx\n", (ulong)root); hq_open(); tot = dump_tasks_in_cfs_rq(cfs_rq); hq_close(); if (!tot) { INDENT(5); fprintf(fp, "[no tasks queued]\n"); } } FREEBUF(runqbuf); if (cfs_rq_buf) FREEBUF(cfs_rq_buf); } static void print_group_header_rt(ulong rt_rq, void *t) { int throttled; struct task_group_info *tgi = (struct task_group_info *)t; fprintf(fp, "TASK_GROUP: %lx RT_RQ: %lx", tgi->task_group, rt_rq); if (tgi->name) fprintf(fp, " <%s>", tgi->name); if (VALID_MEMBER(task_group_rt_bandwidth)) { readmem(rt_rq + OFFSET(rt_rq_rt_throttled), KVADDR, &throttled, sizeof(int), "rt_rq rt_throttled", FAULT_ON_ERROR); if (throttled) fprintf(fp, " (THROTTLED)"); } fprintf(fp, "\n"); } static void print_parent_task_group_rt(void *t, int cpu) { int prio; struct task_group_info *tgi; ulong rt_rq_c, rt_rq_p; tgi = ((struct task_group_info *)t)->parent; if (tgi && tgi->use) print_parent_task_group_fair(tgi, cpu); else return; readmem(tgi->task_group + OFFSET(task_group_rt_rq), KVADDR, &rt_rq_c, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); readmem(rt_rq_c + cpu * sizeof(ulong), KVADDR, &rt_rq_p, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); readmem(rt_rq_p + OFFSET(rt_rq_highest_prio), KVADDR, &prio, sizeof(int), "rt_rq highest prio", FAULT_ON_ERROR); INDENT(-1 + 6 * tgi->depth); fprintf(fp, "[%3d] ", prio); print_group_header_rt(rt_rq_p, tgi); tgi->use = 0; } static int dump_tasks_in_lower_dequeued_rt_rq(int depth, ulong rt_rq, int cpu) { int i, prio, tot, delta, nr_running; ulong rt_rq_c, rt_rq_p, group; tot = 0; for (i = 0; i < tgi_p; i++) { delta = tgi_array[i]->depth - depth; if (delta > 1) break; if (tgi_array[i]->use == 0 || delta < 1) continue; readmem(rt_rq + OFFSET(rt_rq_tg), KVADDR, &group, sizeof(ulong), "rt_rq tg", FAULT_ON_ERROR); if (group != tgi_array[i]->parent->task_group) continue; readmem(tgi_array[i]->task_group + OFFSET(task_group_rt_rq), KVADDR, &rt_rq_c, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); readmem(rt_rq_c + cpu * sizeof(ulong), KVADDR, &rt_rq_p, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); if (rt_rq == rt_rq_p) continue; readmem(rt_rq_p + OFFSET(rt_rq_rt_nr_running), KVADDR, &nr_running, sizeof(int), "rt_rq rt_nr_running", FAULT_ON_ERROR); if (nr_running == 0) { tot += dump_tasks_in_lower_dequeued_rt_rq(depth + 1, rt_rq_p, cpu); continue; } print_parent_task_group_rt(tgi_array[i], cpu); readmem(rt_rq_p + OFFSET(rt_rq_highest_prio), KVADDR, &prio, sizeof(int), "rt_rq highest_prio", FAULT_ON_ERROR); INDENT(5 + 6 * depth); fprintf(fp, "[%3d] ", prio); tot++; dump_tasks_in_task_group_rt_rq(depth + 1, rt_rq_p, cpu); } return tot; } static int dump_RT_prio_array(ulong k_prio_array, char *u_prio_array) { int i, c, tot, cnt, qheads; ulong offset, kvaddr, uvaddr; ulong list_head[2]; struct list_data list_data, *ld; struct task_context *tc; ulong my_q, task_addr; char *rt_rq_buf; qheads = (i = ARRAY_LENGTH(rt_prio_array_queue)) ? i : get_array_length("rt_prio_array.queue", NULL, SIZE(list_head)); ld = &list_data; for (i = tot = 0; i < qheads; i++) { offset = OFFSET(rt_prio_array_queue) + (i * SIZE(list_head)); kvaddr = k_prio_array + offset; uvaddr = (ulong)u_prio_array + offset; BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); if (CRASHDEBUG(1)) fprintf(fp, "rt_prio_array[%d] @ %lx => %lx/%lx\n", i, kvaddr, list_head[0], list_head[1]); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) continue; BZERO(ld, sizeof(struct list_data)); ld->start = list_head[0]; ld->flags |= LIST_ALLOCATE; if (VALID_MEMBER(task_struct_rt) && VALID_MEMBER(sched_rt_entity_run_list)) ld->list_head_offset = OFFSET(sched_rt_entity_run_list); else ld->list_head_offset = OFFSET(task_struct_run_list); ld->end = kvaddr; cnt = do_list(ld); for (c = 0; c < cnt; c++) { task_addr = ld->list_ptr[c]; if (VALID_MEMBER(sched_rt_entity_my_q)) { readmem(ld->list_ptr[c] + OFFSET(sched_rt_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (my_q) { rt_rq_buf = GETBUF(SIZE(rt_rq)); readmem(my_q, KVADDR, rt_rq_buf, SIZE(rt_rq), "rt_rq", FAULT_ON_ERROR); tot += dump_RT_prio_array( my_q + OFFSET(rt_rq_active), &rt_rq_buf[OFFSET(rt_rq_active)]); FREEBUF(rt_rq_buf); continue; } } if (VALID_MEMBER(task_struct_rt)) task_addr -= OFFSET(task_struct_rt); else task_addr -= OFFSET(task_struct_run_list); if (!(tc = task_to_context(task_addr))) continue; INDENT(5); fprintf(fp, "[%3d] ", i); fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); tot++; } FREEBUF(ld->list_ptr); } return tot; } static void dump_tasks_in_task_group_rt_rq(int depth, ulong rt_rq, int cpu) { int i, c, tot, cnt, qheads; ulong offset, kvaddr, uvaddr; ulong list_head[2]; struct list_data list_data, *ld; struct task_context *tc; ulong my_q, task_addr, tg, k_prio_array; char *rt_rq_buf, *u_prio_array; k_prio_array = rt_rq + OFFSET(rt_rq_active); rt_rq_buf = GETBUF(SIZE(rt_rq)); readmem(rt_rq, KVADDR, rt_rq_buf, SIZE(rt_rq), "rt_rq", FAULT_ON_ERROR); u_prio_array = &rt_rq_buf[OFFSET(rt_rq_active)]; if (depth) { readmem(rt_rq + OFFSET(rt_rq_tg), KVADDR, &tg, sizeof(ulong), "rt_rq tg", FAULT_ON_ERROR); for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->task_group == tg) { print_group_header_rt(rt_rq, tgi_array[i]); tgi_array[i]->use = 0; break; } } } qheads = (i = ARRAY_LENGTH(rt_prio_array_queue)) ? i : get_array_length("rt_prio_array.queue", NULL, SIZE(list_head)); ld = &list_data; for (i = tot = 0; i < qheads; i++) { offset = OFFSET(rt_prio_array_queue) + (i * SIZE(list_head)); kvaddr = k_prio_array + offset; uvaddr = (ulong)u_prio_array + offset; BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); if (CRASHDEBUG(1)) fprintf(fp, "rt_prio_array[%d] @ %lx => %lx/%lx\n", i, kvaddr, list_head[0], list_head[1]); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) continue; BZERO(ld, sizeof(struct list_data)); ld->start = list_head[0]; ld->flags |= LIST_ALLOCATE; if (VALID_MEMBER(task_struct_rt) && VALID_MEMBER(sched_rt_entity_run_list)) ld->list_head_offset = OFFSET(sched_rt_entity_run_list); else ld->list_head_offset = OFFSET(task_struct_run_list); ld->end = kvaddr; cnt = do_list(ld); for (c = 0; c < cnt; c++) { task_addr = ld->list_ptr[c]; if (INVALID_MEMBER(sched_rt_entity_my_q)) goto is_task; readmem(ld->list_ptr[c] + OFFSET(sched_rt_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (!my_q) { task_addr -= OFFSET(task_struct_rt); goto is_task; } INDENT(5 + 6 * depth); fprintf(fp, "[%3d] ", i); tot++; dump_tasks_in_task_group_rt_rq(depth + 1, my_q, cpu); continue; is_task: if (!(tc = task_to_context(task_addr))) continue; INDENT(5 + 6 * depth); fprintf(fp, "[%3d] ", i); fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); tot++; } FREEBUF(ld->list_ptr); } tot += dump_tasks_in_lower_dequeued_rt_rq(depth, rt_rq, cpu); if (!tot) { INDENT(5 + 6 * depth); fprintf(fp, "[no tasks queued]\n"); } FREEBUF(rt_rq_buf); } static char * get_task_group_name(ulong group) { ulong cgroup, dentry, kernfs_node, parent, name; char *dentry_buf, *tmp; char buf[BUFSIZE]; int len; tmp = NULL; readmem(group + OFFSET(task_group_css) + OFFSET(cgroup_subsys_state_cgroup), KVADDR, &cgroup, sizeof(ulong), "task_group css cgroup", FAULT_ON_ERROR); if (cgroup == 0) return NULL; if (VALID_MEMBER(cgroup_dentry)) { readmem(cgroup + OFFSET(cgroup_dentry), KVADDR, &dentry, sizeof(ulong), "cgroup dentry", FAULT_ON_ERROR); if (dentry == 0) return NULL; dentry_buf = GETBUF(SIZE(dentry)); readmem(dentry, KVADDR, dentry_buf, SIZE(dentry), "dentry", FAULT_ON_ERROR); len = UINT(dentry_buf + OFFSET(dentry_d_name) + OFFSET(qstr_len)); tmp = GETBUF(len + 1); name = ULONG(dentry_buf + OFFSET(dentry_d_name) + OFFSET(qstr_name)); readmem(name, KVADDR, tmp, len, "qstr name", FAULT_ON_ERROR); FREEBUF(dentry_buf); return tmp; } /* * Emulate kernfs_name() and kernfs_name_locked() */ if (INVALID_MEMBER(cgroup_kn) || INVALID_MEMBER(kernfs_node_name) || INVALID_MEMBER(kernfs_node_parent)) return NULL; readmem(cgroup + OFFSET(cgroup_kn), KVADDR, &kernfs_node, sizeof(ulong), "cgroup kn", FAULT_ON_ERROR); if (kernfs_node == 0) return NULL; readmem(kernfs_node + OFFSET(kernfs_node_parent), KVADDR, &parent, sizeof(ulong), "kernfs_node parent", FAULT_ON_ERROR); if (!parent) { tmp = GETBUF(2); strcpy(tmp, "/"); return tmp; } readmem(kernfs_node + OFFSET(kernfs_node_name), KVADDR, &name, sizeof(ulong), "kernfs_node name", FAULT_ON_ERROR); if (!name || !read_string(name, buf, BUFSIZE-1)) return NULL; tmp = GETBUF(strlen(buf)+1); strcpy(tmp, buf); return tmp; } static void fill_task_group_info_array(int depth, ulong group, char *group_buf, int i) { int d; ulong kvaddr, uvaddr, offset; ulong list_head[2], next; struct task_group_info **tgi_array_new; d = tgi_p; tgi_array[tgi_p] = (struct task_group_info *) GETBUF(sizeof(struct task_group_info)); if (depth) tgi_array[tgi_p]->use = 1; else tgi_array[tgi_p]->use = 0; tgi_array[tgi_p]->depth = depth; tgi_array[tgi_p]->name = get_task_group_name(group); tgi_array[tgi_p]->task_group = group; if (i >= 0) tgi_array[tgi_p]->parent = tgi_array[i]; else tgi_array[tgi_p]->parent = NULL; tgi_p++; if (tgi_p == tgi_p_max) { tgi_p_max += MAX_GROUP_NUM; tgi_array_new = (struct task_group_info **) GETBUF(sizeof(void *) * tgi_p_max); BCOPY(tgi_array, tgi_array_new, sizeof(void *) * tgi_p); FREEBUF(tgi_array); tgi_array = tgi_array_new; } offset = OFFSET(task_group_children); kvaddr = group + offset; uvaddr = (ulong)(group_buf + offset); BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) return; next = list_head[0]; while (next != kvaddr) { group = next - OFFSET(task_group_siblings); readmem(group, KVADDR, group_buf, SIZE(task_group), "task_group", FAULT_ON_ERROR); next = ULONG(group_buf + OFFSET(task_group_siblings) + OFFSET(list_head_next)); fill_task_group_info_array(depth + 1, group, group_buf, d); } } static void dump_tasks_by_task_group(void) { int cpu, displayed; ulong root_task_group, cfs_rq = 0, cfs_rq_p; ulong rt_rq = 0, rt_rq_p; char *buf; struct task_context *tc; char *task_group_name; ulong *cpus; cfs_rq_offset_init(); task_group_offset_init(); root_task_group = 0; task_group_name = NULL; if (symbol_exists("init_task_group")) { root_task_group = symbol_value("init_task_group"); task_group_name = "INIT"; } else if (symbol_exists("root_task_group")) { root_task_group = symbol_value("root_task_group"); task_group_name = "ROOT"; } else error(FATAL, "cannot determine root task_group\n"); tgi_p_max = MAX_GROUP_NUM; tgi_array = (struct task_group_info **)GETBUF(sizeof(void *) * tgi_p_max); buf = GETBUF(SIZE(task_group)); readmem(root_task_group, KVADDR, buf, SIZE(task_group), "task_group", FAULT_ON_ERROR); if (VALID_MEMBER(task_group_rt_rq)) rt_rq = ULONG(buf + OFFSET(task_group_rt_rq)); if (VALID_MEMBER(task_group_cfs_rq)) cfs_rq = ULONG(buf + OFFSET(task_group_cfs_rq)); fill_task_group_info_array(0, root_task_group, buf, -1); sort_task_group_info_array(); if (CRASHDEBUG(1)) print_task_group_info_array(); get_active_set(); cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = displayed = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if (rt_rq) readmem(rt_rq + cpu * sizeof(ulong), KVADDR, &rt_rq_p, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); if (cfs_rq) readmem(cfs_rq + cpu * sizeof(ulong), KVADDR, &cfs_rq_p, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); fprintf(fp, "%sCPU %d", displayed++ ? "\n" : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, " [OFFLINE]\n"); continue; } else fprintf(fp, "\n"); fprintf(fp, " CURRENT: "); if ((tc = task_to_context(tt->active_set[cpu]))) fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); else fprintf(fp, "%lx\n", tt->active_set[cpu]); if (rt_rq) { fprintf(fp, " %s_TASK_GROUP: %lx RT_RQ: %lx\n", task_group_name, root_task_group, rt_rq_p); reuse_task_group_info_array(); dump_tasks_in_task_group_rt_rq(0, rt_rq_p, cpu); } if (cfs_rq) { fprintf(fp, " %s_TASK_GROUP: %lx CFS_RQ: %lx\n", task_group_name, root_task_group, cfs_rq_p); reuse_task_group_info_array(); dump_tasks_in_task_group_cfs_rq(0, cfs_rq_p, cpu, tc); } } FREEBUF(buf); free_task_group_info_array(); } #undef _NSIG #define _NSIG 64 #define _NSIG_BPW machdep->bits #define _NSIG_WORDS (_NSIG / _NSIG_BPW) #undef SIGRTMIN #define SIGRTMIN 32 static struct signame { char *name; char *altname; } signame[_NSIG] = { /* 0 */ {NULL, NULL}, /* 1 */ {"SIGHUP", NULL}, /* 2 */ {"SIGINT", NULL}, /* 3 */ {"SIGQUIT", NULL}, /* 4 */ {"SIGILL", NULL}, /* 5 */ {"SIGTRAP", NULL}, /* 6 */ {"SIGABRT", "SIGIOT"}, /* 7 */ {"SIGBUS", NULL}, /* 8 */ {"SIGFPE", NULL}, /* 9 */ {"SIGKILL", NULL}, /* 10 */ {"SIGUSR1", NULL}, /* 11 */ {"SIGSEGV", NULL}, /* 12 */ {"SIGUSR2", NULL}, /* 13 */ {"SIGPIPE", NULL}, /* 14 */ {"SIGALRM", NULL}, /* 15 */ {"SIGTERM", NULL}, /* 16 */ {"SIGSTKFLT", NULL}, /* 17 */ {"SIGCHLD", "SIGCLD"}, /* 18 */ {"SIGCONT", NULL}, /* 19 */ {"SIGSTOP", NULL}, /* 20 */ {"SIGTSTP", NULL}, /* 21 */ {"SIGTTIN", NULL}, /* 22 */ {"SIGTTOU", NULL}, /* 23 */ {"SIGURG", NULL}, /* 24 */ {"SIGXCPU", NULL}, /* 25 */ {"SIGXFSZ", NULL}, /* 26 */ {"SIGVTALRM", NULL}, /* 27 */ {"SIGPROF", NULL}, /* 28 */ {"SIGWINCH", NULL}, /* 29 */ {"SIGIO", "SIGPOLL"}, /* 30 */ {"SIGPWR", NULL}, /* 31 */ {"SIGSYS", "SIGUNUSED"}, {NULL, NULL}, /* Real time signals start here. */ }; static int sigrt_minmax(int *min, int *max) { int sigrtmax, j; sigrtmax = THIS_KERNEL_VERSION < LINUX(2,5,0) ? _NSIG - 1 : _NSIG; if (min && max) { j = sigrtmax-SIGRTMIN-1; *max = j / 2; *min = j - *max; } return sigrtmax; } static void signame_list(void) { int i, sigrtmax, j, min, max; sigrtmax = sigrt_minmax(&min, &max); j = 1; for (i = 1; i <= sigrtmax; i++) { if ((i == SIGRTMIN) || (i == sigrtmax)) { fprintf(fp, "[%d] %s", i, (i== SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX"); } else if (i > SIGRTMIN) { if (j <= min){ fprintf(fp, "[%d] %s%d", i , "SIGRTMIN+", j); j++; } else if (max >= 1) { fprintf(fp, "[%d] %s%d", i , "SIGRTMAX-",max); max--; } } else { if (!signame[i].name) continue; fprintf(fp, "%s[%d] %s", i < 10 ? " " : "", i, signame[i].name); if (signame[i].altname) fprintf(fp, "/%s", signame[i].altname); } fprintf(fp, "\n"); } } /* * Translate the bits in a signal set into their name strings. */ static void translate_sigset(ulonglong sigset) { int sigrtmax, min, max, i, j, c, len; char buf[BUFSIZE]; if (!sigset) { fprintf(fp, "(none)\n"); return; } len = 0; sigrtmax= sigrt_minmax(&min, &max); j = 1; for (i = 1, c = 0; i <= sigrtmax; i++) { if (sigset & (ulonglong)1) { if (i == SIGRTMIN || i == sigrtmax) sprintf(buf, "%s%s", c++ ? " " : "", (i==SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX"); else if (i > SIGRTMIN) { if (j <= min) sprintf(buf, "%s%s%d", c++ ? " " : "", "SIGRTMIN+", j); else if (max >= 1) sprintf(buf, "%s%s%d", c++ ? " " : "", "SIGRTMAX-", max); } else sprintf(buf, "%s%s", c++ ? " " : "", signame[i].name); if ((len + strlen(buf)) > 80) { shift_string_left(buf, 1); fprintf(fp, "\n"); len = 0; } len += strlen(buf); fprintf(fp, "%s", buf); } sigset >>= 1; if (i > SIGRTMIN) { if (j <= min) j++; else if (max >= 1) max--; } } fprintf(fp, "\n"); } /* * Machine dependent interface to modify signame struct contents. */ void modify_signame(int sig, char *name, char *altname) { signame[sig].name = name; signame[sig].altname = altname; } /* * Display all signal-handling data for a task. * * Reference handling framework is here, but not used as of yet. */ void cmd_sig(void) { int c, tcnt, bogus; ulong value; ulonglong sigset; struct reference *ref; struct task_context *tc; ulong *tasklist; char *siglist; int thread_group = FALSE; tasklist = (ulong *)GETBUF((MAXARGS+NR_CPUS)*sizeof(ulong)); ref = (struct reference *)GETBUF(sizeof(struct reference)); siglist = GETBUF(BUFSIZE); ref->str = siglist; while ((c = getopt(argcnt, args, "lR:s:g")) != EOF) { switch(c) { case 's': sigset = htoll(optarg, FAULT_ON_ERROR, NULL); translate_sigset(sigset); return; case 'R': if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, optarg); break; case 'l': signame_list(); return; case 'g': pc->curcmd_flags |= TASK_SPECIFIED; thread_group = TRUE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); tcnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) tasklist[tcnt++] = tc->task; break; case STR_TASK: tasklist[tcnt++] = value; break; case STR_INVALID: bogus++; error(INFO, "invalid task or pid value: %s\n\n", args[optind]); break; } } else if (strstr(args[optind], ",") || MEMBER_EXISTS("task_struct", args[optind])) { if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, args[optind]); } else error(INFO, "invalid task or pid value: %s\n\n", args[optind]); optind++; } if (!tcnt && !bogus) tasklist[tcnt++] = CURRENT_TASK(); for (c = 0; c < tcnt; c++) { if (thread_group) do_sig_thread_group(tasklist[c]); else { do_sig(tasklist[c], 0, strlen(ref->str) ? ref : NULL); fprintf(fp, "\n"); } } } /* * Do the work for the "sig -g" command option, coming from sig or foreach. */ static void do_sig_thread_group(ulong task) { int i; int cnt; struct task_context *tc; ulong tgid; tc = task_to_context(task); tgid = task_tgid(task); if (tc->pid != tgid) { if (pc->curcmd_flags & TASK_SPECIFIED) { if (!(tc = tgid_to_context(tgid))) return; task = tc->task; } else return; } if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN)) return; print_task_header(fp, tc, 0); dump_signal_data(tc, THREAD_GROUP_LEVEL); fprintf(fp, "\n "); print_task_header(fp, tc, 0); dump_signal_data(tc, TASK_LEVEL|TASK_INDENT); tc = FIRST_CONTEXT(); for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->task == task) continue; if (task_tgid(tc->task) == tgid) { fprintf(fp, "\n "); print_task_header(fp, tc, 0); dump_signal_data(tc, TASK_LEVEL|TASK_INDENT); cnt++; if (tc->pid == 0) pc->curcmd_flags |= IDLE_TASK_SHOWN; } } fprintf(fp, "\n"); } /* * Do the work for the sig command, coming from sig or foreach. */ void do_sig(ulong task, ulong flags, struct reference *ref) { struct task_context *tc; tc = task_to_context(task); if (ref) signal_reference(tc, flags, ref); else { if (!(flags & FOREACH_SIG)) print_task_header(fp, tc, 0); dump_signal_data(tc, TASK_LEVEL|THREAD_GROUP_LEVEL); } } /* * Implementation for -R reference for the sig command. */ static void signal_reference(struct task_context *tc, ulong flags, struct reference *ref) { if (flags & FOREACH_SIG) error(FATAL, "sig: -R not supported yet\n"); else error(FATAL, "-R not supported yet\n"); } /* * Dump all signal-handling data for a task. */ static void dump_signal_data(struct task_context *tc, ulong flags) { int i, sigrtmax, others, use_sighand; int translate, sigpending; uint ti_flags; ulonglong sigset, blocked, mask; ulong signal_struct, kaddr, handler, sa_flags, sigqueue; ulong sighand_struct; long size; char *signal_buf, *uaddr; ulong shared_pending, signal; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; sigpending = sigqueue = 0; sighand_struct = signal_struct = 0; if (VALID_STRUCT(sigqueue) && !VALID_MEMBER(sigqueue_next)) { MEMBER_OFFSET_INIT(sigqueue_next, "sigqueue", "next"); MEMBER_OFFSET_INIT(sigqueue_list, "sigqueue", "list"); MEMBER_OFFSET_INIT(sigqueue_info, "sigqueue", "info"); } else if (!VALID_MEMBER(signal_queue_next)) { MEMBER_OFFSET_INIT(signal_queue_next, "signal_queue", "next"); MEMBER_OFFSET_INIT(signal_queue_info, "signal_queue", "info"); } sigset = task_signal(tc->task, 0); if (!tt->last_task_read) return; if (VALID_MEMBER(task_struct_sig)) signal_struct = ULONG(tt->task_struct + OFFSET(task_struct_sig)); else if (VALID_MEMBER(task_struct_signal)) signal_struct = ULONG(tt->task_struct + OFFSET(task_struct_signal)); size = MAX(SIZE(signal_struct), VALID_SIZE(signal_queue) ? SIZE(signal_queue) : SIZE(sigqueue)); if (VALID_SIZE(sighand_struct)) size = MAX(size, SIZE(sighand_struct)); signal_buf = GETBUF(size); if (signal_struct) readmem(signal_struct, KVADDR, signal_buf, SIZE(signal_struct), "signal_struct buffer", FAULT_ON_ERROR); /* * Signal dispositions (thread group level). */ if (flags & THREAD_GROUP_LEVEL) { if (flags & TASK_INDENT) INDENT(2); fprintf(fp, "SIGNAL_STRUCT: %lx ", signal_struct); if (!signal_struct) { fprintf(fp, "\n"); return; } if (VALID_MEMBER(signal_struct_count)) fprintf(fp, "COUNT: %d\n", INT(signal_buf + OFFSET(signal_struct_count))); else if (VALID_MEMBER(signal_struct_nr_threads)) fprintf(fp, "NR_THREADS: %d\n", INT(signal_buf + OFFSET(signal_struct_nr_threads))); else fprintf(fp, "\n"); if (flags & TASK_INDENT) INDENT(2); fprintf(fp, " SIG %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN == 8 ? 9 : VADDR_PRLEN, CENTER, "SIGACTION"), mkstring(buf2, UVADDR_PRLEN, RJUST, "HANDLER"), mkstring(buf3, 16, CENTER, "MASK"), mkstring(buf4, VADDR_PRLEN, LJUST, "FLAGS")); if (VALID_MEMBER(task_struct_sighand)) { sighand_struct = ULONG(tt->task_struct + OFFSET(task_struct_sighand)); readmem(sighand_struct, KVADDR, signal_buf, SIZE(sighand_struct), "sighand_struct buffer", FAULT_ON_ERROR); use_sighand = TRUE; } else use_sighand = FALSE; sigrtmax = sigrt_minmax(NULL, NULL); for (i = 1; i <= sigrtmax; i++) { if (flags & TASK_INDENT) INDENT(2); fprintf(fp, "%s[%d] ", i < 10 ? " " : "", i); if (use_sighand) { kaddr = sighand_struct + OFFSET(sighand_struct_action) + ((i-1) * SIZE(k_sigaction)); uaddr = signal_buf + OFFSET(sighand_struct_action) + ((i-1) * SIZE(k_sigaction)); } else { kaddr = signal_struct + OFFSET(signal_struct_action) + ((i-1) * SIZE(k_sigaction)); uaddr = signal_buf + OFFSET(signal_struct_action) + ((i-1) * SIZE(k_sigaction)); } handler = ULONG(uaddr + OFFSET(sigaction_sa_handler)); switch ((long)handler) { case -1: mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_ERR"); break; case 0: mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_DFL"); break; case 1: mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_IGN"); break; default: mkstring(buf1, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(handler)); break; } mask = sigaction_mask((ulong)uaddr); sa_flags = ULONG(uaddr + OFFSET(sigaction_sa_flags)); fprintf(fp, "%s%s %s %016llx %lx ", space(MINSPACE-1), mkstring(buf2, UVADDR_PRLEN,LJUST|LONG_HEX,MKSTR(kaddr)), buf1, mask, sa_flags); if (sa_flags) { others = 0; translate = 1; if (sa_flags & SA_NOCLDSTOP) fprintf(fp, "%s%sSA_NOCLDSTOP", translate-- > 0 ? "(" : "", others++ ? "|" : ""); #ifdef SA_RESTORER if (sa_flags & SA_RESTORER) fprintf(fp, "%s%sSA_RESTORER", translate-- > 0 ? "(" : "", others++ ? "|" : ""); #endif #ifdef SA_NOCLDWAIT if (sa_flags & SA_NOCLDWAIT) fprintf(fp, "%s%sSA_NOCLDWAIT", translate-- > 0 ? "(" : "", others++ ? "|" : ""); #endif if (sa_flags & SA_SIGINFO) fprintf(fp, "%s%sSA_SIGINFO", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (sa_flags & SA_ONSTACK) fprintf(fp, "%s%sSA_ONSTACK", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (sa_flags & SA_RESTART) fprintf(fp, "%s%sSA_RESTART", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (sa_flags & SA_NODEFER) fprintf(fp, "%s%sSA_NODEFER", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (sa_flags & SA_RESETHAND) fprintf(fp, "%s%sSA_RESETHAND", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (translate < 1) fprintf(fp, ")"); } fprintf(fp, "\n"); } } if (flags & TASK_LEVEL) { /* * Pending signals (task level). */ if (VALID_MEMBER(task_struct_sigpending)) sigpending = INT(tt->task_struct + OFFSET(task_struct_sigpending)); else if (VALID_MEMBER(thread_info_flags)) { fill_thread_info(tc->thread_info); ti_flags = UINT(tt->thread_info + OFFSET(thread_info_flags)); sigpending = ti_flags & (1<task); if (flags & TASK_INDENT) INDENT(2); fprintf(fp, " BLOCKED: %016llx\n", blocked); /* * Pending queue (task level). */ if (flags & TASK_INDENT) INDENT(2); if (VALID_MEMBER(signal_struct_shared_pending)) { fprintf(fp, "PRIVATE_PENDING\n"); if (flags & TASK_INDENT) INDENT(2); } fprintf(fp, " SIGNAL: %016llx\n", sigset); if (VALID_MEMBER(task_struct_sigqueue)) sigqueue = ULONG(tt->task_struct + OFFSET(task_struct_sigqueue)); else if (VALID_MEMBER(task_struct_pending)) sigqueue = ULONG(tt->task_struct + OFFSET(task_struct_pending) + OFFSET_OPTION(sigpending_head, sigpending_list)); if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) sigqueue = 0; if (flags & TASK_INDENT) INDENT(2); if (sigqueue) { fprintf(fp, " SIGQUEUE: SIG %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); sigqueue_list(sigqueue); } else fprintf(fp, " SIGQUEUE: (empty)\n"); } /* * Pending queue (thread group level). */ if ((flags & THREAD_GROUP_LEVEL) && VALID_MEMBER(signal_struct_shared_pending)) { fprintf(fp, "SHARED_PENDING\n"); shared_pending = signal_struct + OFFSET(signal_struct_shared_pending); signal = shared_pending + OFFSET(sigpending_signal); readmem(signal, KVADDR, signal_buf,SIZE(sigpending_signal), "signal", FAULT_ON_ERROR); sigset = task_signal(0, (ulong*)signal_buf); if (flags & TASK_INDENT) INDENT(2); fprintf(fp, " SIGNAL: %016llx\n", sigset); sigqueue = (shared_pending + OFFSET_OPTION(sigpending_head, sigpending_list) + OFFSET(list_head_next)); readmem(sigqueue,KVADDR, signal_buf, SIZE(sigqueue), "sigqueue", FAULT_ON_ERROR); sigqueue = ULONG(signal_buf); if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) sigqueue = 0; if (flags & TASK_INDENT) INDENT(2); if (sigqueue) { fprintf(fp, " SIGQUEUE: SIG %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); sigqueue_list(sigqueue); } else fprintf(fp, " SIGQUEUE: (empty)\n"); } FREEBUF(signal_buf); } /* * Dump a pending signal queue (private/shared). */ static void sigqueue_list(ulong sigqueue) { ulong sigqueue_save, next; int sig; char *signal_buf; long size; size = VALID_SIZE(signal_queue) ? SIZE(signal_queue) : SIZE(sigqueue); signal_buf = GETBUF(size); sigqueue_save = sigqueue; while (sigqueue) { readmem(sigqueue, KVADDR, signal_buf, SIZE_OPTION(signal_queue, sigqueue), "signal_queue/sigqueue", FAULT_ON_ERROR); if (VALID_MEMBER(signal_queue_next) && VALID_MEMBER(signal_queue_info)) { next = ULONG(signal_buf + OFFSET(signal_queue_next)); sig = INT(signal_buf + OFFSET(signal_queue_info) + OFFSET(siginfo_si_signo)); } else { next = ULONG(signal_buf + OFFSET_OPTION(sigqueue_next, sigqueue_list)); sig = INT(signal_buf + OFFSET(sigqueue_info) + OFFSET(siginfo_si_signo)); } if (sigqueue_save == next) break; fprintf(fp, " %3d %lx\n", sig, sigqueue + OFFSET_OPTION(signal_queue_info, sigqueue_info)); sigqueue = next; } FREEBUF(signal_buf); } /* * Return the current set of signals sent to a task, in the form of * a long long data type form that can be easily masked regardless * of its size. */ static ulonglong task_signal(ulong task, ulong *signal) { ulong *sigset_ptr; ulonglong sigset = 0; if (task) { fill_task_struct(task); if (!tt->last_task_read) return 0; if (VALID_MEMBER(sigpending_signal)) { sigset_ptr = (ulong *)(tt->task_struct + OFFSET(task_struct_pending) + OFFSET(sigpending_signal)); } else if (VALID_MEMBER(task_struct_signal)) { sigset_ptr = (ulong *)(tt->task_struct + OFFSET(task_struct_signal)); } else return 0; } else if (signal) { sigset_ptr = signal; } else return 0; switch (_NSIG_WORDS) { case 1: sigset = (ulonglong)sigset_ptr[0]; break; case 2: sigset = (ulonglong)(sigset_ptr[1]) << 32; sigset |= (ulonglong)(sigset_ptr[0]); break; } return sigset; } /* * Return the current set of signals that a task has blocked, in the form * of a long long data type form that can be easily masked regardless * of its size. */ static ulonglong task_blocked(ulong task) { ulonglong sigset; ulong *sigset_ptr; fill_task_struct(task); if (!tt->last_task_read) return 0; sigset_ptr = (ulong *)(tt->task_struct + OFFSET(task_struct_blocked)); sigset = (ulonglong)(sigset_ptr[1]) << 32; sigset |= (ulonglong)(sigset_ptr[0]); return sigset; } static ulonglong sigaction_mask(ulong sigaction) { ulonglong sigset; ulong *sigset_ptr; sigset = 0; sigset_ptr = (ulong *)(sigaction + OFFSET(sigaction_sa_mask)); switch (_NSIG_WORDS) { case 1: sigset = (ulonglong)sigset_ptr[0]; break; case 2: sigset = (ulonglong)(sigset_ptr[1]) << 32; sigset |= (ulonglong)(sigset_ptr[0]); break; } return sigset; } /* * Deal with potential separation of task_struct and kernel stack. */ ulong generic_get_stackbase(ulong task) { return task_to_stackbase(task); } ulong generic_get_stacktop(ulong task) { return task_to_stackbase(task) + STACKSIZE(); } crash-7.1.4/unwind_decoder.c0000664000000000000000000003156112634305150014465 0ustar rootroot/* * Copyright (C) 2000 Hewlett-Packard Co * Copyright (C) 2000 David Mosberger-Tang */ /* * unwind_decoder.c * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * arch/ia64/kernel/unwind_decoder.c (kernel-2.4.18-6.23) */ /* * Copyright (C) 2000 Hewlett-Packard Co * Copyright (C) 2000 David Mosberger-Tang * * Generic IA-64 unwind info decoder. * * This file is used both by the Linux kernel and objdump. Please keep * the two copies of this file in sync. * * You need to customize the decoder by defining the following * macros/constants before including this file: * * Types: * unw_word Unsigned integer type with at least 64 bits * * Register names: * UNW_REG_BSP * UNW_REG_BSPSTORE * UNW_REG_FPSR * UNW_REG_LC * UNW_REG_PFS * UNW_REG_PR * UNW_REG_RNAT * UNW_REG_PSP * UNW_REG_RP * UNW_REG_UNAT * * Decoder action macros: * UNW_DEC_BAD_CODE(code) * UNW_DEC_ABI(fmt,abi,context,arg) * UNW_DEC_BR_GR(fmt,brmask,gr,arg) * UNW_DEC_BR_MEM(fmt,brmask,arg) * UNW_DEC_COPY_STATE(fmt,label,arg) * UNW_DEC_EPILOGUE(fmt,t,ecount,arg) * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg) * UNW_DEC_FR_MEM(fmt,frmask,arg) * UNW_DEC_GR_GR(fmt,grmask,gr,arg) * UNW_DEC_GR_MEM(fmt,grmask,arg) * UNW_DEC_LABEL_STATE(fmt,label,arg) * UNW_DEC_MEM_STACK_F(fmt,t,size,arg) * UNW_DEC_MEM_STACK_V(fmt,t,arg) * UNW_DEC_PRIUNAT_GR(fmt,r,arg) * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg) * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg) * UNW_DEC_PROLOGUE(fmt,body,rlen,arg) * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg) * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg) * UNW_DEC_REG_REG(fmt,src,dst,arg) * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg) * UNW_DEC_REG_WHEN(fmt,reg,t,arg) * UNW_DEC_RESTORE(fmt,t,abreg,arg) * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg) * UNW_DEC_SPILL_BASE(fmt,pspoff,arg) * UNW_DEC_SPILL_MASK(fmt,imaskp,arg) * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg) * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg) * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg) * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg) */ static unw_word unw_decode_uleb128 (unsigned char **dpp) { unsigned shift = 0; unw_word byte, result = 0; unsigned char *bp = *dpp; while (1) { byte = *bp++; result |= (byte & 0x7f) << shift; if ((byte & 0x80) == 0) break; shift += 7; } *dpp = bp; return result; } static unsigned char * unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, abreg; unw_word t, off; byte1 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); ytreg = byte2; x = (byte1 >> 7) & 1; if ((byte1 & 0x80) == 0 && ytreg == 0) UNW_DEC_RESTORE(X2, t, abreg, arg); else UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg); return dp; } static unsigned char * unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, qp; unw_word t, off; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; t = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); x = (byte2 >> 7) & 1; ytreg = byte3; if ((byte2 & 0x80) == 0 && byte3 == 0) UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); else UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); return dp; } static unsigned char * unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg) { int body = (code & 0x20) != 0; unw_word rlen; rlen = (code & 0x1f); UNW_DEC_PROLOGUE(R1, body, rlen, arg); return dp; } static unsigned char * unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, mask, grsave; unw_word rlen; byte1 = *dp++; mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); grsave = (byte1 & 0x7f); rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg); return dp; } static unsigned char * unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg) { unw_word rlen; rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg); return dp; } static unsigned char * unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char brmask = (code & 0x1f); UNW_DEC_BR_MEM(P1, brmask, arg); return dp; } static unsigned char * unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg) { if ((code & 0x10) == 0) { unsigned char byte1 = *dp++; UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1), (byte1 & 0x7f), arg); } else if ((code & 0x08) == 0) { unsigned char byte1 = *dp++, r, dst; r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); dst = (byte1 & 0x7f); switch (r) { case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break; case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break; case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break; case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break; case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break; case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break; case 6: UNW_DEC_RP_BR(P3, dst, arg); break; case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break; case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break; case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break; case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break; case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else if ((code & 0x7) == 0) UNW_DEC_SPILL_MASK(P4, dp, arg); else if ((code & 0x7) == 1) { unw_word grmask, frmask, byte1, byte2, byte3; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; grmask = ((byte1 >> 4) & 0xf); frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3; UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg); } else UNW_DEC_BAD_CODE(code); return dp; } static unsigned char * unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg) { int gregs = (code & 0x10) != 0; unsigned char mask = (code & 0x0f); if (gregs) UNW_DEC_GR_MEM(P6, mask, arg); else UNW_DEC_FR_MEM(P6, mask, arg); return dp; } static unsigned char * unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg) { unsigned char r, byte1, byte2; unw_word t, size; if ((code & 0x10) == 0) { r = (code & 0xf); t = unw_decode_uleb128 (&dp); switch (r) { case 0: size = unw_decode_uleb128 (&dp); UNW_DEC_MEM_STACK_F(P7, t, size, arg); break; case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break; case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break; case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break; case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break; case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break; case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break; case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break; case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break; case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break; case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break; case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break; case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break; case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break; case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else { switch (code & 0xf) { case 0x0: /* p8 */ { r = *dp++; t = unw_decode_uleb128 (&dp); switch (r) { case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break; case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break; case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break; case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break; case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break; case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break; case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break; case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break; case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break; case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break; case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break; case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break; case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break; case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break; case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break; case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } break; case 0x1: byte1 = *dp++; byte2 = *dp++; UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg); break; case 0xf: /* p10 */ byte1 = *dp++; byte2 = *dp++; UNW_DEC_ABI(P10, byte1, byte2, arg); break; case 0x9: return unw_decode_x1 (dp, code, arg); case 0xa: return unw_decode_x2 (dp, code, arg); case 0xb: return unw_decode_x3 (dp, code, arg); case 0xc: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } } return dp; } static unsigned char * unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg) { unw_word label = (code & 0x1f); if ((code & 0x20) != 0) UNW_DEC_COPY_STATE(B1, label, arg); else UNW_DEC_LABEL_STATE(B1, label, arg); return dp; } static unsigned char * unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg) { unw_word t; t = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg); return dp; } static unsigned char * unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg) { unw_word t, ecount, label; if ((code & 0x10) == 0) { t = unw_decode_uleb128 (&dp); ecount = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B3, t, ecount, arg); } else if ((code & 0x07) == 0) { label = unw_decode_uleb128 (&dp); if ((code & 0x08) != 0) UNW_DEC_COPY_STATE(B4, label, arg); else UNW_DEC_LABEL_STATE(B4, label, arg); } else switch (code & 0x7) { case 1: return unw_decode_x1 (dp, code, arg); case 2: return unw_decode_x2 (dp, code, arg); case 3: return unw_decode_x3 (dp, code, arg); case 4: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } return dp; } typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *); static unw_decoder unw_decode_table[2][8] = { /* prologue table: */ { unw_decode_r1, /* 0 */ unw_decode_r1, unw_decode_r2, unw_decode_r3, unw_decode_p1, /* 4 */ unw_decode_p2_p5, unw_decode_p6, unw_decode_p7_p10 }, { unw_decode_r1, /* 0 */ unw_decode_r1, unw_decode_r2, unw_decode_r3, unw_decode_b1, /* 4 */ unw_decode_b1, unw_decode_b2, unw_decode_b3_x4 } }; /* * Decode one descriptor and return address of next descriptor. */ static inline unsigned char * unw_decode (unsigned char *dp, int inside_body, void *arg) { unw_decoder decoder; unsigned char code; code = *dp++; decoder = unw_decode_table[inside_body][code >> 5]; dp = (*decoder) (dp, code, arg); return dp; } crash-7.1.4/ia64.c0000775000000000000000000037264212634305150012252 0ustar rootroot/* ia64.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013 David Anderson * Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef IA64 #include "defs.h" #include "xen_hyper_defs.h" #include static int ia64_verify_symbol(const char *, ulong, char); static int ia64_eframe_search(struct bt_info *); static void ia64_back_trace_cmd(struct bt_info *); static void ia64_old_unwind(struct bt_info *); static void ia64_old_unwind_init(void); static void try_old_unwind(struct bt_info *); static void ia64_dump_irq(int); static ulong ia64_processor_speed(void); static int ia64_vtop_4l(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_vtop(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_uvtop(struct task_context *, ulong, physaddr_t *, int); static int ia64_kvtop(struct task_context *, ulong, physaddr_t *, int); static ulong ia64_get_task_pgd(ulong); static ulong ia64_get_pc(struct bt_info *); static ulong ia64_get_sp(struct bt_info *); static ulong ia64_get_thread_ksp(ulong); static void ia64_get_stack_frame(struct bt_info *, ulong *, ulong *); static int ia64_translate_pte(ulong, void *, ulonglong); static ulong ia64_vmalloc_start(void); static int ia64_is_task_addr(ulong); static int ia64_dis_filter(ulong, char *, unsigned int); static void ia64_dump_switch_stack(ulong, ulong); static void ia64_cmd_mach(void); static int ia64_get_smp_cpus(void); static void ia64_display_machine_stats(void); static void ia64_display_cpu_data(unsigned int); static void ia64_display_memmap(void); static void ia64_create_memmap(void); static ulong check_mem_limit(void); static int ia64_verify_paddr(uint64_t); static int ia64_available_memory(struct efi_memory_desc_t *); static void ia64_post_init(void); static ulong ia64_in_per_cpu_mca_stack(void); static struct line_number_hook ia64_line_number_hooks[]; static ulong ia64_get_stackbase(ulong); static ulong ia64_get_stacktop(ulong); static void parse_cmdline_args(void); static void ia64_calc_phys_start(void); static int ia64_get_kvaddr_ranges(struct vaddr_range *); struct unw_frame_info; static void dump_unw_frame_info(struct unw_frame_info *); static int old_unw_unwind(struct unw_frame_info *); static void unw_init_from_blocked_task(struct unw_frame_info *, ulong); static ulong ia64_rse_slot_num(ulong *); static ulong *ia64_rse_skip_regs(ulong *, long); static ulong *ia64_rse_rnat_addr(ulong *); static ulong rse_read_reg(struct unw_frame_info *, int, int *); static void rse_function_params(struct unw_frame_info *, char *); static int ia64_vtop_4l_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_vtop_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *); static int ia64_xendump_p2m_create(struct xendump_data *); static void ia64_debug_dump_page(FILE *, char *, char *); static char *ia64_xendump_load_page(ulong, struct xendump_data *); static int ia64_xendump_page_index(ulong, struct xendump_data *); static ulong ia64_xendump_panic_task(struct xendump_data *); static void ia64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); static void ia64_init_hyper(int); struct machine_specific ia64_machine_specific = { 0 }; void ia64_init(int when) { struct syment *sp, *spn; if (XEN_HYPER_MODE()) { ia64_init_hyper(when); return; } switch (when) { case SETUP_ENV: #if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); #endif #if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); #endif break; case PRE_SYMTAB: machdep->verify_symbol = ia64_verify_symbol; machdep->machspec = &ia64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); switch (machdep->pagesize) { case 4096: machdep->stacksize = (power(2, 3) * PAGESIZE()); break; case 8192: machdep->stacksize = (power(2, 2) * PAGESIZE()); break; case 16384: machdep->stacksize = (power(2, 1) * PAGESIZE()); break; case 65536: machdep->stacksize = (power(2, 0) * PAGESIZE()); break; default: machdep->stacksize = 32*1024; break; } if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = ia64_verify_paddr; machdep->get_kvaddr_ranges = ia64_get_kvaddr_ranges; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->machspec->phys_start = UNKNOWN_PHYS_START; if (machdep->cmdline_args[0]) parse_cmdline_args(); if (ACTIVE()) machdep->flags |= DEVMEMRD; break; case PRE_GDB: if (pc->flags & KERNEL_DEBUG_QUERY) return; /* * Until the kernel core dump and va_server library code * do the right thing with respect to the configured page size, * try to recognize a fatal inequity between the compiled-in * page size and the page size used by the kernel. */ if ((sp = symbol_search("empty_zero_page")) && (spn = next_symbol(NULL, sp)) && ((spn->value - sp->value) != PAGESIZE())) error(FATAL, "compiled-in page size: %d (apparent) kernel page size: %ld\n", PAGESIZE(), spn->value - sp->value); machdep->kvbase = KERNEL_VMALLOC_BASE; machdep->identity_map_base = KERNEL_CACHED_BASE; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ia64_eframe_search; machdep->back_trace = ia64_back_trace_cmd; machdep->processor_speed = ia64_processor_speed; machdep->uvtop = ia64_uvtop; machdep->kvtop = ia64_kvtop; machdep->get_task_pgd = ia64_get_task_pgd; machdep->dump_irq = ia64_dump_irq; machdep->get_stack_frame = ia64_get_stack_frame; machdep->get_stackbase = ia64_get_stackbase; machdep->get_stacktop = ia64_get_stacktop; machdep->translate_pte = ia64_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = ia64_vmalloc_start; machdep->is_task_addr = ia64_is_task_addr; machdep->dis_filter = ia64_dis_filter; machdep->cmd_mach = ia64_cmd_mach; machdep->get_smp_cpus = ia64_get_smp_cpus; machdep->line_number_hooks = ia64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->show_interrupts = generic_show_interrupts; if ((sp = symbol_search("_stext"))) { machdep->machspec->kernel_region = VADDR_REGION(sp->value); machdep->machspec->kernel_start = sp->value; } else { machdep->machspec->kernel_region = KERNEL_CACHED_REGION; machdep->machspec->kernel_start = KERNEL_CACHED_BASE; } if (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION) { machdep->machspec->vmalloc_start = machdep->machspec->kernel_start + GIGABYTES((ulong)(4)); if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) ia64_calc_phys_start(); } else machdep->machspec->vmalloc_start = KERNEL_VMALLOC_BASE; machdep->xen_kdump_p2m_create = ia64_xen_kdump_p2m_create; machdep->xendump_p2m_create = ia64_xendump_p2m_create; machdep->xendump_panic_task = ia64_xendump_panic_task; machdep->get_xendump_regs = ia64_get_xendump_regs; break; case POST_GDB: STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64"); STRUCT_SIZE_INIT(switch_stack, "switch_stack"); MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph"); MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0"); MEMBER_OFFSET_INIT(switch_stack_ar_bspstore, "switch_stack", "ar_bspstore"); MEMBER_OFFSET_INIT(switch_stack_ar_pfs, "switch_stack", "ar_pfs"); MEMBER_OFFSET_INIT(switch_stack_ar_rnat, "switch_stack", "ar_rnat"); MEMBER_OFFSET_INIT(switch_stack_pr, "switch_stack", "pr"); MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq"); MEMBER_OFFSET_INIT(cpuinfo_ia64_unimpl_va_mask, "cpuinfo_ia64", "unimpl_va_mask"); MEMBER_OFFSET_INIT(cpuinfo_ia64_unimpl_pa_mask, "cpuinfo_ia64", "unimpl_pa_mask"); if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (symbol_exists("_irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "_irq_desc", NULL, 0); if (!machdep->hz) machdep->hz = 1024; machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; ia64_create_memmap(); break; case POST_INIT: ia64_post_init(); break; case LOG_ONLY: machdep->machspec = &ia64_machine_specific; machdep->machspec->kernel_start = kt->vmcoreinfo._stext_SYMBOL; machdep->machspec->kernel_region = VADDR_REGION(kt->vmcoreinfo._stext_SYMBOL); if (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION) { machdep->machspec->vmalloc_start = machdep->machspec->kernel_start + GIGABYTES((ulong)(4)); ia64_calc_phys_start(); } break; } } /* * --machdep defaults to the physical start location. * * Otherwise, it's got to be a "item=value" string, separated * by commas if more than one is passed in. */ void parse_cmdline_args(void) { int index, i, c, errflag; char *p; char buf[BUFSIZE]; char *arglist[MAXARGS]; ulong value; struct machine_specific *ms; int vm_flag; ms = &ia64_machine_specific; vm_flag = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { errflag = 0; value = htol(machdep->cmdline_args[index], RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->phys_start = value; error(NOTE, "setting phys_start to: 0x%lx\n", ms->phys_start); } else error(WARNING, "ignoring --machdep option: %s\n\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { errflag = 0; if (STRNEQ(arglist[i], "phys_start=")) { p = arglist[i] + strlen("phys_start="); if (strlen(p)) { value = htol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->phys_start = value; error(NOTE, "setting phys_start to: 0x%lx\n", ms->phys_start); continue; } } } else if (STRNEQ(arglist[i], "init_stack_size=")) { p = arglist[i] + strlen("init_stack_size="); if (strlen(p)) { value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->ia64_init_stack_size = (int)value; error(NOTE, "setting init_stack_size to: 0x%x (%d)\n", ms->ia64_init_stack_size, ms->ia64_init_stack_size); continue; } } } else if (STRNEQ(arglist[i], "vm=")) { vm_flag++; p = arglist[i] + strlen("vm="); if (strlen(p)) { if (STREQ(p, "4l")) { machdep->flags |= VM_4_LEVEL; continue; } } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } if (vm_flag) { switch (machdep->flags & (VM_4_LEVEL)) { case VM_4_LEVEL: error(NOTE, "using 4-level pagetable\n"); c++; break; default: error(WARNING, "invalid vm= option\n"); c++; machdep->flags &= ~(VM_4_LEVEL); break; } } if (c) fprintf(fp, "\n"); } } int ia64_in_init_stack(ulong addr) { ulong init_stack_addr; if (!symbol_exists("ia64_init_stack")) return FALSE; /* * ia64_init_stack could be aliased to region 5 */ init_stack_addr = ia64_VTOP(symbol_value("ia64_init_stack")); addr = ia64_VTOP(addr); if ((addr < init_stack_addr) || (addr >= (init_stack_addr+machdep->machspec->ia64_init_stack_size))) return FALSE; return TRUE; } static ulong ia64_in_per_cpu_mca_stack(void) { int plen, i; ulong flag; ulong vaddr, paddr, stackbase, stacktop; ulong *__per_cpu_mca; struct task_context *tc; tc = CURRENT_CONTEXT(); if (STRNEQ(CURRENT_COMM(), "INIT")) flag = INIT; else if (STRNEQ(CURRENT_COMM(), "MCA")) flag = MCA; else return 0; if (!symbol_exists("__per_cpu_mca") || !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || (plen < kt->cpus)) return 0; vaddr = SWITCH_STACK_ADDR(CURRENT_TASK()); if (VADDR_REGION(vaddr) != KERNEL_CACHED_REGION) return 0; paddr = ia64_VTOP(vaddr); __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * kt->cpus); if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, sizeof(ulong) * kt->cpus, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) return 0; if (CRASHDEBUG(1)) { for (i = 0; i < kt->cpus; i++) { fprintf(fp, "__per_cpu_mca[%d]: %lx\n", i, __per_cpu_mca[i]); } } stackbase = __per_cpu_mca[tc->processor]; stacktop = stackbase + (STACKSIZE() * 2); FREEBUF(__per_cpu_mca); if ((paddr >= stackbase) && (paddr < stacktop)) return flag; else return 0; } void ia64_dump_machdep_table(ulong arg) { int i, others, verbose; struct machine_specific *ms; verbose = FALSE; ms = &ia64_machine_specific; if (arg) { switch (arg) { default: case 1: verbose = TRUE; break; case 2: if (machdep->flags & NEW_UNWIND) { machdep->flags &= ~(NEW_UNWIND|NEW_UNW_V1|NEW_UNW_V2|NEW_UNW_V3); machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; ms->dump_unwind_stats = NULL; ms->unwind_debug = NULL; } else { machdep->flags &= ~OLD_UNWIND; machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("pt_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } ms->unwind_init(); return; case 3: if (machdep->flags & NEW_UNWIND) ms->unwind_debug(arg); return; } } others = 0; fprintf(fp, " flags: %lx (", machdep->flags); /* future flags tests here */ if (machdep->flags & NEW_UNWIND) fprintf(fp, "%sNEW_UNWIND", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V1) fprintf(fp, "%sNEW_UNW_V1", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V2) fprintf(fp, "%sNEW_UNW_V2", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V3) fprintf(fp, "%sNEW_UNW_V3", others++ ? "|" : ""); if (machdep->flags & OLD_UNWIND) fprintf(fp, "%sOLD_UNWIND", others++ ? "|" : ""); if (machdep->flags & UNW_OUT_OF_SYNC) fprintf(fp, "%sUNW_OUT_OF_SYNC", others++ ? "|" : ""); if (machdep->flags & UNW_READ) fprintf(fp, "%sUNW_READ", others++ ? "|" : ""); if (machdep->flags & UNW_PTREGS) fprintf(fp, "%sUNW_PTREGS", others++ ? "|" : ""); if (machdep->flags & UNW_R0) fprintf(fp, "%sUNW_R0", others++ ? "|" : ""); if (machdep->flags & MEM_LIMIT) fprintf(fp, "%sMEM_LIMIT", others++ ? "|" : ""); if (machdep->flags & DEVMEMRD) fprintf(fp, "%sDEVMEMRD", others++ ? "|" : ""); if (machdep->flags & INIT) fprintf(fp, "%sINIT", others++ ? "|" : ""); if (machdep->flags & MCA) fprintf(fp, "%sMCA", others++ ? "|" : ""); if (machdep->flags & VM_4_LEVEL) fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %d\n", machdep->hz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: ia64_eframe_search()\n"); fprintf(fp, " back_trace: ia64_back_trace_cmd()\n"); fprintf(fp, "get_processor_speed: ia64_processor_speed()\n"); fprintf(fp, " uvtop: ia64_uvtop()\n"); fprintf(fp, " kvtop: ia64_kvtop()\n"); fprintf(fp, " get_task_pgd: ia64_get_task_pgd()\n"); fprintf(fp, " dump_irq: ia64_dump_irq()\n"); fprintf(fp, " get_stack_frame: ia64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: ia64_get_stackbase()\n"); fprintf(fp, " get_stacktop: ia64_get_stacktop()\n"); fprintf(fp, " translate_pte: ia64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: ia64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: ia64_is_task_addr()\n"); fprintf(fp, " verify_symbol: ia64_verify_symbol()\n"); fprintf(fp, " dis_filter: ia64_dis_filter()\n"); fprintf(fp, " cmd_mach: ia64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: ia64_get_smp_cpus()\n"); fprintf(fp, " get_kvaddr_ranges: ia64_get_kvaddr_ranges()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: %s()\n", (machdep->verify_paddr == ia64_verify_paddr) ? "ia64_verify_paddr" : "generic_verify_paddr"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: ia64_xen_kdump_p2m_create()\n"); fprintf(fp, " xendump_p2m_create: ia64_xendump_p2m_create()\n"); fprintf(fp, " xendump_panic_task: ia64_xendump_panic_task()\n"); fprintf(fp, " get_xendump_regs: ia64_get_xendump_regs()\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: ia64_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pud_read: %lx\n", machdep->last_pud_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: ia64_machine_specific\n"); fprintf(fp, " cpu_data_address: %lx\n", machdep->machspec->cpu_data_address); fprintf(fp, " unimpl_va_mask: %lx\n", machdep->machspec->unimpl_va_mask); fprintf(fp, " unimpl_pa_mask: %lx\n", machdep->machspec->unimpl_pa_mask); fprintf(fp, " unw: %lx\n", (ulong)machdep->machspec->unw); fprintf(fp, " unw_tables_offset: %ld\n", machdep->machspec->unw_tables_offset); fprintf(fp, " unw_kernel_table_offset: %ld %s\n", machdep->machspec->unw_kernel_table_offset, machdep->machspec->unw_kernel_table_offset ? "" : "(unused)"); fprintf(fp, " unw_pt_regs_offsets: %ld %s\n", machdep->machspec->unw_pt_regs_offsets, machdep->machspec->unw_pt_regs_offsets ? "" : "(unused)"); fprintf(fp, " script_index: %d\n", machdep->machspec->script_index); fprintf(fp, " script_cache: %lx%s", (ulong)machdep->machspec->script_cache, machdep->flags & OLD_UNWIND ? "\n" : " "); if (machdep->flags & NEW_UNWIND) ms->dump_unwind_stats(); if (!(machdep->flags & (NEW_UNWIND|OLD_UNWIND))) fprintf(fp, "\n"); fprintf(fp, " mem_limit: %lx\n", machdep->machspec->mem_limit); fprintf(fp, " kernel_region: %ld\n", machdep->machspec->kernel_region); fprintf(fp, " kernel_start: %lx\n", machdep->machspec->kernel_start); fprintf(fp, " phys_start: %lx (%lx)\n", machdep->machspec->phys_start, machdep->machspec->phys_start & KERNEL_TR_PAGE_MASK); fprintf(fp, " vmalloc_start: %lx\n", machdep->machspec->vmalloc_start); fprintf(fp, " ia64_memmap: %lx\n", (ulong)machdep->machspec->ia64_memmap); fprintf(fp, " efi_memmap_size: %ld\n", (ulong)machdep->machspec->efi_memmap_size); fprintf(fp, " efi_memdesc_size: %ld\n", (ulong)machdep->machspec->efi_memdesc_size); fprintf(fp, " unwind_init: "); if (ms->unwind_init == unwind_init_v1) fprintf(fp, "unwind_init_v1()\n"); else if (ms->unwind_init == unwind_init_v2) fprintf(fp, "unwind_init_v2()\n"); else if (ms->unwind_init == unwind_init_v3) fprintf(fp, "unwind_init_v3()\n"); else if (ms->unwind_init == ia64_old_unwind_init) fprintf(fp, "ia64_old_unwind_init()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind_init); fprintf(fp, " unwind: "); if (ms->unwind == unwind_v1) fprintf(fp, "unwind_v1()\n"); else if (ms->unwind == unwind_v2) fprintf(fp, "unwind_v2()\n"); else if (ms->unwind == unwind_v3) fprintf(fp, "unwind_v3()\n"); else if (ms->unwind == ia64_old_unwind) fprintf(fp, "ia64_old_unwind()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind); fprintf(fp, " dump_unwind_stats: "); if (ms->dump_unwind_stats == dump_unwind_stats_v1) fprintf(fp, "dump_unwind_stats_v1()\n"); else if (ms->dump_unwind_stats == dump_unwind_stats_v2) fprintf(fp, "dump_unwind_stats_v2()\n"); else if (ms->dump_unwind_stats == dump_unwind_stats_v3) fprintf(fp, "dump_unwind_stats_v3()\n"); else fprintf(fp, "%lx\n", (ulong)ms->dump_unwind_stats); fprintf(fp, " unwind_debug: "); if (ms->unwind_debug == unwind_debug_v1) fprintf(fp, "unwind_debug_v1()\n"); else if (ms->unwind_debug == unwind_debug_v2) fprintf(fp, "unwind_debug_v2()\n"); else if (ms->unwind_debug == unwind_debug_v3) fprintf(fp, "unwind_debug_v3()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind_debug); fprintf(fp, " ia64_init_stack_size: %d\n", ms->ia64_init_stack_size); if (verbose) ia64_display_memmap(); } /* * Keep or reject a symbol from the namelist. */ static int ia64_verify_symbol(const char *name, ulong value, char type) { ulong region; if (!name || !strlen(name)) return FALSE; if (XEN_HYPER_MODE() && STREQ(name, "__per_cpu_shift")) return TRUE; if (CRASHDEBUG(8)) fprintf(fp, "%016lx %s\n", value, name); // if (STREQ(name, "phys_start") && type == 'A') // if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) // machdep->machspec->phys_start = value; region = VADDR_REGION(value); return (((region == KERNEL_CACHED_REGION) || (region == KERNEL_VMALLOC_REGION))); } /* * Look for likely exception frames in a stack. */ static int ia64_eframe_search(struct bt_info *bt) { return(error(FATAL, "ia64_eframe_search: not available for this architecture\n")); } /* * Unroll a kernel stack. */ #define BT_SWITCH_STACK BT_SYMBOLIC_ARGS static void ia64_back_trace_cmd(struct bt_info *bt) { struct machine_specific *ms = &ia64_machine_specific; if (bt->flags & BT_SWITCH_STACK) ia64_dump_switch_stack(bt->task, 0); if (machdep->flags & UNW_OUT_OF_SYNC) error(FATAL, "kernel and %s unwind data structures are out of sync\n", pc->program_name); ms->unwind(bt); if (bt->flags & BT_UNWIND_ERROR) try_old_unwind(bt); } /* * Dump the IRQ table. */ static void ia64_dump_irq(int irq) { if (symbol_exists("irq_desc") || symbol_exists("_irq_desc") || kernel_symbol_exists("irq_desc_ptrs")) { machdep->dump_irq = generic_dump_irq; return(generic_dump_irq(irq)); } error(FATAL, "ia64_dump_irq: neither irq_desc or _irq_desc exist\n"); } /* * Calculate and return the speed of the processor. */ static ulong ia64_processor_speed(void) { ulong mhz, proc_freq; int bootstrap_processor; if (machdep->mhz) return(machdep->mhz); mhz = 0; bootstrap_processor = 0; if (!machdep->machspec->cpu_data_address || !VALID_STRUCT(cpuinfo_ia64) || !VALID_MEMBER(cpuinfo_ia64_proc_freq)) return (machdep->mhz = mhz); if (symbol_exists("bootstrap_processor")) get_symbol_data("bootstrap_processor", sizeof(int), &bootstrap_processor); if (bootstrap_processor == -1) bootstrap_processor = 0; readmem(machdep->machspec->cpu_data_address + OFFSET(cpuinfo_ia64_proc_freq), KVADDR, &proc_freq, sizeof(ulong), "cpuinfo_ia64 proc_freq", FAULT_ON_ERROR); mhz = proc_freq/1000000; return (machdep->mhz = mhz); } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 4 level page table. */ static int ia64_vtop_4l(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { ulong *page_dir; ulong *page_upper; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pud_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); if (!(pud_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P | _PAGE_PROTNONE))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 3 level page table. */ static int ia64_vtop(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT_3L) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P | _PAGE_PROTNONE))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * swapper_pg_dir, making it irrelevant in this processor's case. */ static int ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return ia64_kvtop(tc, uvaddr, paddr, verbose); if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l_xen_wpt(uvaddr, paddr, pgd, verbose, 1); else return ia64_vtop_xen_wpt(uvaddr, paddr, pgd, verbose, 1); } else { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l(uvaddr, paddr, pgd, verbose, 1); else return ia64_vtop(uvaddr, paddr, pgd, verbose, 1); } } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int ia64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { *paddr = ia64_VTOP(kvaddr); return TRUE; } switch (VADDR_REGION(kvaddr)) { case KERNEL_UNCACHED_REGION: *paddr = kvaddr - KERNEL_UNCACHED_BASE; if (verbose) fprintf(fp, "[UNCACHED MEMORY]\n"); return TRUE; case KERNEL_CACHED_REGION: *paddr = ia64_VTOP(kvaddr); if (verbose) fprintf(fp, "[MAPPED IN TRANSLATION REGISTER]\n"); return TRUE; case KERNEL_VMALLOC_REGION: if (ia64_IS_VMALLOC_ADDR(kvaddr)) break; if ((kvaddr < machdep->machspec->kernel_start) && (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION)) { *paddr = PADDR_NOT_AVAILABLE; return FALSE; } *paddr = ia64_VTOP(kvaddr); if (verbose) fprintf(fp, "[MAPPED IN TRANSLATION REGISTER]\n"); return TRUE; } if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l_xen_wpt(kvaddr, paddr, pgd, verbose, 0); else return ia64_vtop_xen_wpt(kvaddr, paddr, pgd, verbose, 0); } else { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l(kvaddr, paddr, pgd, verbose, 0); else return ia64_vtop(kvaddr, paddr, pgd, verbose, 0); } } /* * Even though thread_info structs are used in 2.6, they * are not the stack base. (until further notice...) */ static ulong ia64_get_stackbase(ulong task) { return (task); } static ulong ia64_get_stacktop(ulong task) { return (ia64_get_stackbase(task) + STACKSIZE()); } /* * Get the relevant page directory pointer from a task structure. */ static ulong ia64_get_task_pgd(ulong task) { return (error(FATAL, "ia64_get_task_pgd: N/A\n")); } static void ia64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (pcp) *pcp = ia64_get_pc(bt); if (spp) *spp = ia64_get_sp(bt); } /* * Return the kernel switch_stack b0 value. */ static ulong ia64_get_pc(struct bt_info *bt) { ulong b0; readmem(SWITCH_STACK_ADDR(bt->task) + OFFSET(switch_stack_b0), KVADDR, &b0, sizeof(void *), "switch_stack b0", FAULT_ON_ERROR); return b0; } /* * Return the kernel switch_stack ar_bspstore value. * If it's "bt -t" request, calculate the register backing store offset. */ static ulong ia64_get_sp(struct bt_info *bt) { ulong bspstore; readmem(SWITCH_STACK_ADDR(bt->task) + OFFSET(switch_stack_ar_bspstore), KVADDR, &bspstore, sizeof(void *), "switch_stack ar_bspstore", FAULT_ON_ERROR); if (bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { bspstore = bt->task + SIZE(task_struct); if (tt->flags & THREAD_INFO) bspstore += SIZE(thread_info); bspstore = roundup(bspstore, sizeof(ulong)); } return bspstore; } /* * Get the ksp out of the task's thread_struct */ static ulong ia64_get_thread_ksp(ulong task) { ulong ksp; if (XEN_HYPER_MODE()) { readmem(task + XEN_HYPER_OFFSET(vcpu_thread_ksp), KVADDR, &ksp, sizeof(void *), "vcpu thread ksp", FAULT_ON_ERROR); } else { readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, &ksp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } return ksp; } /* * Return the switch_stack structure address of a task. */ ulong ia64_get_switch_stack(ulong task) { ulong sw; if (LKCD_DUMPFILE() && (sw = get_lkcd_switch_stack(task))) return sw; /* * debug only: get panic switch_stack from the ELF header. */ if (CRASHDEBUG(3) && NETDUMP_DUMPFILE() && (sw = get_netdump_switch_stack(task))) return sw; if (DISKDUMP_DUMPFILE() && (sw = get_diskdump_switch_stack(task))) return sw; return (ia64_get_thread_ksp((ulong)(task)) + 16); } /* * Translate a PTE, returning TRUE if the page is _PAGE_P. * If a physaddr pointer is passed in, don't print anything. */ static int ia64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; char *ptr; ulong paddr; paddr = pte & _PFN_MASK; page_present = !!(pte & (_PAGE_P | _PAGE_PROTNONE)); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_P) fprintf(fp, "%sP", others++ ? "|" : ""); switch (pte & _PAGE_MA_MASK) { case _PAGE_MA_WB: ptr = "MA_WB"; break; case _PAGE_MA_UC: ptr = "MA_UC"; break; case _PAGE_MA_UCE: ptr = "MA_UCE"; break; case _PAGE_MA_WC: ptr = "MA_WC"; break; case _PAGE_MA_NAT: ptr = "MA_NAT"; break; case (0x1 << 2): ptr = "MA_UC"; break; default: ptr = "MA_RSV"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); switch (pte & _PAGE_PL_MASK) { case _PAGE_PL_0: ptr = "PL_0"; break; case _PAGE_PL_1: ptr = "PL_1"; break; case _PAGE_PL_2: ptr = "PL_2"; break; case _PAGE_PL_3: ptr = "PL_3"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); switch (pte & _PAGE_AR_MASK) { case _PAGE_AR_R: ptr = "AR_R"; break; case _PAGE_AR_RX: ptr = "AT_RX"; break; case _PAGE_AR_RW: ptr = "AR_RW"; break; case _PAGE_AR_RWX: ptr = "AR_RWX"; break; case _PAGE_AR_R_RW: ptr = "AR_R_RW"; break; case _PAGE_AR_RX_RWX: ptr = "AR_RX_RWX"; break; case _PAGE_AR_RWX_RW: ptr = "AR_RWX_RW"; break; case _PAGE_AR_X_RX: ptr = "AR_X_RX"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); if (pte & _PAGE_A) fprintf(fp, "%sA", others++ ? "|" : ""); if (pte & _PAGE_D) fprintf(fp, "%sD", others++ ? "|" : ""); if (pte & _PAGE_ED) fprintf(fp, "%sED", others++ ? "|" : ""); if (pte & _PAGE_PROTNONE) fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * Determine where vmalloc'd memory starts. */ static ulong ia64_vmalloc_start(void) { return machdep->machspec->vmalloc_start; } /* * Verify that an address is a task_struct address. */ static int ia64_is_task_addr(ulong task) { int i; if (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)) return TRUE; for (i = 0; i < kt->cpus; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int ia64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1, *p2; int argc; int revise_bracket, stop_bit; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on ia64) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); revise_bracket = stop_bit = 0; if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { revise_bracket = TRUE; stop_bit = FALSE; } else if ((FIRSTCHAR(argv[argc-1]) == '<') && strstr(argv[argc-1], ">;;")) { revise_bracket = TRUE; stop_bit = TRUE; } if (revise_bracket) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, "0x")) p1--; if (!STRNEQ(p1, "0x")) return FALSE; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>%s\n", value, value_to_symstr(value, buf2, output_radix), stop_bit ? ";;" : ""); sprintf(p1, "%s", buf1); } else if (STRNEQ(argv[argc-2], "br.call.") && STRNEQ(argv[argc-1], "b0=0x")) { /* * Update module function calls of these formats: * * br.call.sptk.many b0=0xa0000000003d5e40;; * br.call.sptk.many b0=0xa00000000001dfc0 * * to show a bracketed function name if the destination * address is a known symbol with no offset. */ if ((p1 = strstr(argv[argc-1], ";;")) && (p2 = strstr(inbuf, ";;\n"))) { *p1 = NULLCHAR; p1 = &argv[argc-1][3]; if (extract_hex(p1, &value, NULLCHAR, TRUE)) { sprintf(buf1, " <%s>;;\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p2, "%s", buf1); } } else { p1 = &argv[argc-1][3]; p2 = &LASTCHAR(inbuf); if (extract_hex(p1, &value, '\n', TRUE)) { sprintf(buf1, " <%s>\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p2, "%s", buf1); } } } console(" %s", inbuf); return TRUE; } /* * Format the pt_regs structure. */ enum pt_reg_names { P_cr_ipsr, P_cr_iip, P_cr_ifs, P_ar_unat, P_ar_pfs, P_ar_rsc, P_ar_rnat, P_ar_bspstore, P_ar_ccv, P_ar_fpsr, P_pr, P_loadrs, P_b0, P_b6, P_b7, P_r1, P_r2, P_r3, P_r8, P_r9, P_r10, P_r11, P_r12, P_r13, P_r14, P_r15, P_r16, P_r17, P_r18, P_r19, P_r20, P_r21, P_r22, P_r23, P_r24, P_r25, P_r26, P_r27, P_r28, P_r29, P_r30, P_r31, P_f6_lo, P_f6_hi, P_f7_lo, P_f7_hi, P_f8_lo, P_f8_hi, P_f9_lo, P_f9_hi, P_f10_lo, P_f10_hi, P_f11_lo, P_f11_hi, NUM_PT_REGS}; void ia64_exception_frame(ulong addr, struct bt_info *bt) { char buf[BUFSIZE], *p, *p1; int fval; ulong value1, value2; ulong eframe[NUM_PT_REGS]; console("ia64_exception_frame: pt_regs: %lx\n", addr); if (bt->debug) CRASHDEBUG_RESTORE(); CRASHDEBUG_SUSPEND(0); BZERO(&eframe, sizeof(ulong) * NUM_PT_REGS); open_tmpfile(); if (XEN_HYPER_MODE()) dump_struct("cpu_user_regs", addr, RADIX(16)); else dump_struct("pt_regs", addr, RADIX(16)); rewind(pc->tmpfile); fval = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "f6 = ")) { fval = 6; continue; } if (strstr(buf, "f7 = ")) { fval = 7; continue; } if (strstr(buf, "f8 = ")) { fval = 8; continue; } if (strstr(buf, "f9 = ")) { fval = 9; continue; } if (strstr(buf, "f10 = ")) { fval = 10; continue; } if (strstr(buf, "f11 = ")) { fval = 11; continue; } if (!strstr(buf, "0x")) continue; if (fval) { p = strstr(buf, "0x"); if ((p1 = strstr(p, "}"))) *p1 = NULLCHAR; extract_hex(p, &value1, ',', TRUE); p = strstr(buf, ","); extract_hex(p, &value2, NULLCHAR, FALSE); switch (fval) { case 6: eframe[P_f6_lo] = value1; eframe[P_f6_hi] = value2; break; case 7: eframe[P_f7_lo] = value1; eframe[P_f7_hi] = value2; break; case 8: eframe[P_f8_lo] = value1; eframe[P_f8_hi] = value2; break; case 9: eframe[P_f9_lo] = value1; eframe[P_f9_hi] = value2; break; case 10: eframe[P_f10_lo] = value1; eframe[P_f10_hi] = value2; break; case 11: eframe[P_f11_lo] = value1; eframe[P_f11_hi] = value2; break; } fval = 0; continue; } strip_comma(clean_line(buf)); p = strstr(buf, " = "); extract_hex(p, &value1, NULLCHAR, FALSE); if (strstr(buf, "cr_ipsr = ")) { eframe[P_cr_ipsr] = value1; } if (strstr(buf, "cr_iip = ")) { eframe[P_cr_iip] = value1; } if (strstr(buf, "cr_ifs = ")) { eframe[P_cr_ifs] = value1; } if (strstr(buf, "ar_unat = ")) { eframe[P_ar_unat] = value1; } if (strstr(buf, "ar_pfs = ")) { eframe[P_ar_pfs] = value1; } if (strstr(buf, "ar_rsc = ")) { eframe[P_ar_rsc] = value1; } if (strstr(buf, "ar_rnat = ")) { eframe[P_ar_rnat] = value1; } if (strstr(buf, "ar_bspstore = ")) { eframe[P_ar_bspstore] = value1; } if (strstr(buf, "ar_ccv = ")) { eframe[P_ar_ccv] = value1; } if (strstr(buf, "ar_fpsr = ")) { eframe[P_ar_fpsr] = value1; } if (strstr(buf, "pr = ")) { eframe[P_pr] = value1; } if (strstr(buf, "loadrs = ")) { eframe[P_loadrs] = value1; } if (strstr(buf, "b0 = ")) { eframe[P_b0] = value1; } if (strstr(buf, "b6 = ")) { eframe[P_b6] = value1; } if (strstr(buf, "b7 = ")) { eframe[P_b7] = value1; } if (strstr(buf, "r1 = ")) { eframe[P_r1] = value1; } if (strstr(buf, "r2 = ")) { eframe[P_r2] = value1; } if (strstr(buf, "r3 = ")) { eframe[P_r3] = value1; } if (strstr(buf, "r8 = ")) { eframe[P_r8] = value1; } if (strstr(buf, "r9 = ")) { eframe[P_r9] = value1; } if (strstr(buf, "r10 = ")) { eframe[P_r10] = value1; } if (strstr(buf, "r11 = ")) { eframe[P_r11] = value1; } if (strstr(buf, "r12 = ")) { eframe[P_r12] = value1; } if (strstr(buf, "r13 = ")) { eframe[P_r13] = value1; } if (strstr(buf, "r14 = ")) { eframe[P_r14] = value1; } if (strstr(buf, "r15 = ")) { eframe[P_r15] = value1; } if (strstr(buf, "r16 = ")) { eframe[P_r16] = value1; } if (strstr(buf, "r17 = ")) { eframe[P_r17] = value1; } if (strstr(buf, "r18 = ")) { eframe[P_r18] = value1; } if (strstr(buf, "r19 = ")) { eframe[P_r19] = value1; } if (strstr(buf, "r20 = ")) { eframe[P_r20] = value1; } if (strstr(buf, "r21 = ")) { eframe[P_r21] = value1; } if (strstr(buf, "r22 = ")) { eframe[P_r22] = value1; } if (strstr(buf, "r23 = ")) { eframe[P_r23] = value1; } if (strstr(buf, "r24 = ")) { eframe[P_r24] = value1; } if (strstr(buf, "r25 = ")) { eframe[P_r25] = value1; } if (strstr(buf, "r26 = ")) { eframe[P_r26] = value1; } if (strstr(buf, "r27 = ")) { eframe[P_r27] = value1; } if (strstr(buf, "r28 = ")) { eframe[P_r28] = value1; } if (strstr(buf, "r29 = ")) { eframe[P_r29] = value1; } if (strstr(buf, "r30 = ")) { eframe[P_r30] = value1; } if (strstr(buf, "r31 = ")) { eframe[P_r31] = value1; } } close_tmpfile(); fprintf(fp, " EFRAME: %lx\n", addr); if (bt->flags & BT_INCOMPLETE_USER_EFRAME) { fprintf(fp, " [exception frame incomplete -- check salinfo for complete context]\n"); bt->flags &= ~BT_INCOMPLETE_USER_EFRAME; } fprintf(fp, " B0: %016lx CR_IIP: %016lx\n", eframe[P_b0], eframe[P_cr_iip]); /** if (is_kernel_text(eframe[P_cr_iip])) fprintf(fp, "<%s>", value_to_symstr(eframe[P_cr_iip], buf, 0)); fprintf(fp, "\n"); **/ fprintf(fp, " CR_IPSR: %016lx CR_IFS: %016lx\n", eframe[P_cr_ipsr], eframe[P_cr_ifs]); fprintf(fp, " AR_PFS: %016lx AR_RSC: %016lx\n", eframe[P_ar_pfs], eframe[P_ar_rsc]); fprintf(fp, " AR_UNAT: %016lx AR_RNAT: %016lx\n", eframe[P_ar_unat], eframe[P_ar_rnat]); fprintf(fp, " AR_CCV: %016lx AR_FPSR: %016lx\n", eframe[P_ar_ccv], eframe[P_ar_fpsr]); fprintf(fp, " LOADRS: %016lx AR_BSPSTORE: %016lx\n", eframe[P_loadrs], eframe[P_ar_bspstore]); fprintf(fp, " B6: %016lx B7: %016lx\n", eframe[P_b6], eframe[P_b7]); fprintf(fp, " PR: %016lx R1: %016lx\n", eframe[P_pr], eframe[P_r1]); fprintf(fp, " R2: %016lx R3: %016lx\n", eframe[P_r2], eframe[P_r3]); fprintf(fp, " R8: %016lx R9: %016lx\n", eframe[P_r8], eframe[P_r9]); fprintf(fp, " R10: %016lx R11: %016lx\n", eframe[P_r10], eframe[P_r11]); fprintf(fp, " R12: %016lx R13: %016lx\n", eframe[P_r12], eframe[P_r13]); fprintf(fp, " R14: %016lx R15: %016lx\n", eframe[P_r14], eframe[P_r15]); fprintf(fp, " R16: %016lx R17: %016lx\n", eframe[P_r16], eframe[P_r17]); fprintf(fp, " R18: %016lx R19: %016lx\n", eframe[P_r18], eframe[P_r19]); fprintf(fp, " R20: %016lx R21: %016lx\n", eframe[P_r20], eframe[P_r21]); fprintf(fp, " R22: %016lx R23: %016lx\n", eframe[P_r22], eframe[P_r23]); fprintf(fp, " R24: %016lx R25: %016lx\n", eframe[P_r24], eframe[P_r25]); fprintf(fp, " R26: %016lx R27: %016lx\n", eframe[P_r26], eframe[P_r27]); fprintf(fp, " R28: %016lx R29: %016lx\n", eframe[P_r28], eframe[P_r29]); fprintf(fp, " R30: %016lx R31: %016lx\n", eframe[P_r30], eframe[P_r31]); fprintf(fp, " F6: %05lx%016lx ", eframe[P_f6_hi], eframe[P_f6_lo]); fprintf(fp, " F7: %05lx%016lx\n", eframe[P_f7_hi], eframe[P_f7_lo]); fprintf(fp, " F8: %05lx%016lx ", eframe[P_f8_hi], eframe[P_f8_lo]); fprintf(fp, " F9: %05lx%016lx\n", eframe[P_f9_hi], eframe[P_f9_lo]); if (machdep->flags & NEW_UNW_V3) { fprintf(fp, " F10: %05lx%016lx ", eframe[P_f10_hi], eframe[P_f10_lo]); fprintf(fp, " F11: %05lx%016lx\n", eframe[P_f11_hi], eframe[P_f11_lo]); } CRASHDEBUG_RESTORE(); if (bt->debug) CRASHDEBUG_SUSPEND(bt->debug); } enum ss_reg_names { S_caller_unat, S_ar_fpsr, S_f2_lo, S_f2_hi, S_f3_lo, S_f3_hi, S_f4_lo, S_f4_hi, S_f5_lo, S_f5_hi, S_f10_lo, S_f10_hi, S_f11_lo, S_f11_hi, S_f12_lo, S_f12_hi, S_f13_lo, S_f13_hi, S_f14_lo, S_f14_hi, S_f15_lo, S_f15_hi, S_f16_lo, S_f16_hi, S_f17_lo, S_f17_hi, S_f18_lo, S_f18_hi, S_f19_lo, S_f19_hi, S_f20_lo, S_f20_hi, S_f21_lo, S_f21_hi, S_f22_lo, S_f22_hi, S_f23_lo, S_f23_hi, S_f24_lo, S_f24_hi, S_f25_lo, S_f25_hi, S_f26_lo, S_f26_hi, S_f27_lo, S_f27_hi, S_f28_lo, S_f28_hi, S_f29_lo, S_f29_hi, S_f30_lo, S_f30_hi, S_f31_lo, S_f31_hi, S_r4, S_r5, S_r6, S_r7, S_b0, S_b1, S_b2, S_b3, S_b4, S_b5, S_ar_pfs, S_ar_lc, S_ar_unat, S_ar_rnat, S_ar_bspstore, S_pr, NUM_SS_REGS }; /* * Format the switch_stack structure. */ static void ia64_dump_switch_stack(ulong task, ulong flag) { ulong addr; char buf[BUFSIZE], *p; int fval; ulong value1, value2; ulong ss[NUM_SS_REGS]; addr = SWITCH_STACK_ADDR(task); BZERO(&ss, sizeof(ulong) * NUM_SS_REGS); open_tmpfile(); dump_struct("switch_stack", addr, RADIX(16)); rewind(pc->tmpfile); fval = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "f2 = ")) { fval = 2; continue; } if (strstr(buf, "f3 = ")) { fval = 3; continue; } if (strstr(buf, "f4 = ")) { fval = 4; continue; } if (strstr(buf, "f5 = ")) { fval = 5; continue; } if (strstr(buf, "f10 = ")) { fval = 10; continue; } if (strstr(buf, "f11 = ")) { fval = 11; continue; } if (strstr(buf, "f12 = ")) { fval = 12; continue; } if (strstr(buf, "f13 = ")) { fval = 13; continue; } if (strstr(buf, "f14 = ")) { fval = 14; continue; } if (strstr(buf, "f15 = ")) { fval = 15; continue; } if (strstr(buf, "f16 = ")) { fval = 16; continue; } if (strstr(buf, "f17 = ")) { fval = 17; continue; } if (strstr(buf, "f18 = ")) { fval = 18; continue; } if (strstr(buf, "f19 = ")) { fval = 19; continue; } if (strstr(buf, "f20 = ")) { fval = 20; continue; } if (strstr(buf, "f21 = ")) { fval = 21; continue; } if (strstr(buf, "f22 = ")) { fval = 22; continue; } if (strstr(buf, "f23 = ")) { fval = 23; continue; } if (strstr(buf, "f24 = ")) { fval = 24; continue; } if (strstr(buf, "f25 = ")) { fval = 25; continue; } if (strstr(buf, "f26 = ")) { fval = 26; continue; } if (strstr(buf, "f27 = ")) { fval = 27; continue; } if (strstr(buf, "f28 = ")) { fval = 28; continue; } if (strstr(buf, "f29 = ")) { fval = 29; continue; } if (strstr(buf, "f30 = ")) { fval = 30; continue; } if (strstr(buf, "f31 = ")) { fval = 31; continue; } if (!strstr(buf, "0x")) continue; if (fval) { p = strstr(buf, "0x"); extract_hex(p, &value1, ',', TRUE); p = strstr(buf, ","); extract_hex(p, &value2, '}', FALSE); switch (fval) { case 2: ss[S_f2_lo] = value1; ss[S_f2_hi] = value2; break; case 3: ss[S_f3_lo] = value1; ss[S_f3_hi] = value2; break; case 4: ss[S_f4_lo] = value1; ss[S_f4_hi] = value2; break; case 5: ss[S_f5_lo] = value1; ss[S_f5_hi] = value2; break; case 10: ss[S_f10_lo] = value1; ss[S_f10_hi] = value2; break; case 11: ss[S_f11_lo] = value1; ss[S_f11_hi] = value2; break; case 12: ss[S_f12_lo] = value1; ss[S_f12_hi] = value2; break; case 13: ss[S_f13_lo] = value1; ss[S_f13_hi] = value2; break; case 14: ss[S_f14_lo] = value1; ss[S_f14_hi] = value2; break; case 15: ss[S_f15_lo] = value1; ss[S_f15_hi] = value2; break; case 16: ss[S_f16_lo] = value1; ss[S_f16_hi] = value2; break; case 17: ss[S_f17_lo] = value1; ss[S_f17_hi] = value2; break; case 18: ss[S_f18_lo] = value1; ss[S_f18_hi] = value2; break; case 19: ss[S_f19_lo] = value1; ss[S_f19_hi] = value2; break; case 20: ss[S_f20_lo] = value1; ss[S_f20_hi] = value2; break; case 21: ss[S_f21_lo] = value1; ss[S_f21_hi] = value2; break; case 22: ss[S_f22_lo] = value1; ss[S_f22_hi] = value2; break; case 23: ss[S_f23_lo] = value1; ss[S_f23_hi] = value2; break; case 24: ss[S_f24_lo] = value1; ss[S_f24_hi] = value2; break; case 25: ss[S_f25_lo] = value1; ss[S_f25_hi] = value2; break; case 26: ss[S_f26_lo] = value1; ss[S_f26_hi] = value2; break; case 27: ss[S_f27_lo] = value1; ss[S_f27_hi] = value2; break; case 28: ss[S_f28_lo] = value1; ss[S_f28_hi] = value2; break; case 29: ss[S_f29_lo] = value1; ss[S_f29_hi] = value2; break; case 30: ss[S_f30_lo] = value1; ss[S_f30_hi] = value2; break; case 31: ss[S_f31_lo] = value1; ss[S_f31_hi] = value2; break; } fval = 0; continue; } strip_comma(clean_line(buf)); p = strstr(buf, " = "); extract_hex(p, &value1, NULLCHAR, FALSE); if (strstr(buf, "caller_unat = ")) { ss[S_caller_unat] = value1; } if (strstr(buf, "ar_fpsr = ")) { ss[S_ar_fpsr] = value1; } if (strstr(buf, "r4 = ")) { ss[S_r4] = value1; } if (strstr(buf, "r5 = ")) { ss[S_r5] = value1; } if (strstr(buf, "r6 = ")) { ss[S_r6] = value1; } if (strstr(buf, "r7 = ")) { ss[S_r7] = value1; } if (strstr(buf, "b0 = ")) { ss[S_b0] = value1; } if (strstr(buf, "b1 = ")) { ss[S_b1] = value1; } if (strstr(buf, "b2 = ")) { ss[S_b2] = value1; } if (strstr(buf, "b3 = ")) { ss[S_b3] = value1; } if (strstr(buf, "b4 = ")) { ss[S_b4] = value1; } if (strstr(buf, "b5 = ")) { ss[S_b5] = value1; } if (strstr(buf, "ar_pfs = ")) { ss[S_ar_pfs] = value1; } if (strstr(buf, "ar_lc = ")) { ss[S_ar_lc] = value1; } if (strstr(buf, "ar_unat = ")) { ss[S_ar_unat] = value1; } if (strstr(buf, "ar_rnat = ")) { ss[S_ar_rnat] = value1; } if (strstr(buf, "ar_bspstore = ")) { ss[S_ar_bspstore] = value1; } if (strstr(buf, "pr = ")) { ss[S_pr] = value1; } } close_tmpfile(); fprintf(fp, "SWITCH_STACK: %lx\n", addr); fprintf(fp, " B0: %016lx B1: %016lx\n", ss[S_b0], ss[S_b1]); fprintf(fp, " B2: %016lx B3: %016lx\n", ss[S_b2], ss[S_b3]); fprintf(fp, " B4: %016lx B5: %016lx\n", ss[S_b4], ss[S_b5]); fprintf(fp, " AR_PFS: %016lx AR_LC: %016lx\n", ss[S_ar_pfs], ss[S_ar_lc]); fprintf(fp, " AR_UNAT: %016lx AR_RNAT: %016lx\n", ss[S_ar_unat], ss[S_ar_rnat]); fprintf(fp, " PR: %016lx AR_BSPSTORE: %016lx\n", ss[S_pr], ss[S_ar_bspstore]); fprintf(fp, " AR_FPSR: %016lx CALLER_UNAT: %016lx\n", ss[S_ar_fpsr], ss[S_caller_unat]); fprintf(fp, " R4: %016lx R5: %016lx\n", ss[S_r4], ss[S_r5]); fprintf(fp, " R6: %016lx R7: %016lx\n", ss[S_r6], ss[S_r7]); fprintf(fp, " F2: %05lx%016lx ", ss[S_f2_hi], ss[S_f2_lo]); fprintf(fp, " F3: %05lx%016lx\n", ss[S_f3_hi], ss[S_f3_lo]); fprintf(fp, " F4: %05lx%016lx ", ss[S_f4_hi], ss[S_f4_lo]); fprintf(fp, " F5: %05lx%016lx\n", ss[S_f5_hi], ss[S_f5_lo]); fprintf(fp, " F10: %05lx%016lx ", ss[S_f10_hi], ss[S_f10_lo]); fprintf(fp, " F11: %05lx%016lx\n", ss[S_f11_hi], ss[S_f11_lo]); fprintf(fp, " F12: %05lx%016lx ", ss[S_f12_hi], ss[S_f12_lo]); fprintf(fp, " F13: %05lx%016lx\n", ss[S_f13_hi], ss[S_f13_lo]); fprintf(fp, " F14: %05lx%016lx ", ss[S_f14_hi], ss[S_f14_lo]); fprintf(fp, " F15: %05lx%016lx\n", ss[S_f15_hi], ss[S_f15_lo]); fprintf(fp, " F16: %05lx%016lx ", ss[S_f16_hi], ss[S_f16_lo]); fprintf(fp, " F17: %05lx%016lx\n", ss[S_f17_hi], ss[S_f17_lo]); fprintf(fp, " F18: %05lx%016lx ", ss[S_f18_hi], ss[S_f18_lo]); fprintf(fp, " F19: %05lx%016lx\n", ss[S_f19_hi], ss[S_f19_lo]); fprintf(fp, " F20: %05lx%016lx ", ss[S_f20_hi], ss[S_f20_lo]); fprintf(fp, " F21: %05lx%016lx\n", ss[S_f21_hi], ss[S_f21_lo]); fprintf(fp, " F22: %05lx%016lx ", ss[S_f22_hi], ss[S_f22_lo]); fprintf(fp, " F23: %05lx%016lx\n", ss[S_f23_hi], ss[S_f23_lo]); fprintf(fp, " F24: %05lx%016lx ", ss[S_f24_hi], ss[S_f24_lo]); fprintf(fp, " F25: %05lx%016lx\n", ss[S_f25_hi], ss[S_f25_lo]); fprintf(fp, " F26: %05lx%016lx ", ss[S_f26_hi], ss[S_f26_lo]); fprintf(fp, " F27: %05lx%016lx\n", ss[S_f27_hi], ss[S_f27_lo]); fprintf(fp, " F28: %05lx%016lx ", ss[S_f28_hi], ss[S_f28_lo]); fprintf(fp, " F29: %05lx%016lx\n", ss[S_f29_hi], ss[S_f29_lo]); fprintf(fp, " F30: %05lx%016lx ", ss[S_f30_hi], ss[S_f30_lo]); fprintf(fp, " F31: %05lx%016lx\n", ss[S_f31_hi], ss[S_f31_lo]); } /* * Override smp_num_cpus if possible and necessary. */ int ia64_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_online())) return MAX(cpus, get_highest_cpu_online()+1); else return kt->cpus; } /* * Machine dependent command. */ void ia64_cmd_mach(void) { int c, cflag, mflag; unsigned int radix; cflag = mflag = radix = 0; while ((c = getopt(argcnt, args, "cmxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'm': mflag++; ia64_display_memmap(); break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) ia64_display_cpu_data(radix); if (!cflag && !mflag) ia64_display_machine_stats(); } /* * "mach" command output. */ static void ia64_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); if (!STREQ(kt->hypervisor, "(undetermined)") && !STREQ(kt->hypervisor, "bare hardware")) fprintf(fp, " HYPERVISOR: %s\n", kt->hypervisor); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " KERNEL CACHED REGION: %lx\n", (ulong)KERNEL_CACHED_REGION << REGION_SHIFT); fprintf(fp, " KERNEL UNCACHED REGION: %lx\n", (ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT); fprintf(fp, " KERNEL VMALLOC REGION: %lx\n", (ulong)KERNEL_VMALLOC_REGION << REGION_SHIFT); fprintf(fp, " USER DATA/STACK REGION: %lx\n", (ulong)USER_STACK_REGION << REGION_SHIFT); fprintf(fp, " USER DATA/STACK REGION: %lx\n", (ulong)USER_DATA_REGION << REGION_SHIFT); fprintf(fp, " USER TEXT REGION: %lx\n", (ulong)USER_TEXT_REGION << REGION_SHIFT); fprintf(fp, " USER SHARED MEMORY REGION: %lx\n", (ulong)USER_SHMEM_REGION << REGION_SHIFT); fprintf(fp, "USER IA32 EMULATION REGION: %016lx\n", (ulong)USER_IA32_EMUL_REGION << REGION_SHIFT); } static void ia64_display_cpu_data(unsigned int radix) { int cpu; ulong cpu_data; int array_location_known; struct syment *sp; if (!(cpu_data = machdep->machspec->cpu_data_address)) { error(FATAL, "cannot find cpuinfo_ia64 location\n"); return; } array_location_known = per_cpu_symbol_search("per_cpu__cpu_info") || symbol_exists("cpu_data") || symbol_exists("_cpu_data"); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "%sCPU %d: %s\n", cpu ? "\n" : "", cpu, array_location_known ? "" : "(boot)"); dump_struct("cpuinfo_ia64", cpu_data, radix); if (!array_location_known) break; if ((sp = per_cpu_symbol_search("per_cpu__cpu_info"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) cpu_data = sp->value + kt->__per_cpu_offset[cpu+1]; else break; /* we've already done cpu 0 */ } else cpu_data += SIZE(cpuinfo_ia64); } } /* * Dump the EFI memory map. */ static void ia64_display_memmap(void) { int i, others; struct efi_memory_desc_t *desc; struct machine_specific *ms; char *map; ms = &ia64_machine_specific; map = ms->ia64_memmap; if (!map) { check_mem_limit(); error(FATAL, "efi_mmap not accessible\n"); } fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE / ATTRIBUTE / [ACCESS]\n"); for (i = 0; i < ms->efi_memmap_size/ms->efi_memdesc_size; i++) { desc = (struct efi_memory_desc_t *)map; fprintf(fp, "%016lx - %016lx ", desc->phys_addr, desc->phys_addr + (desc->num_pages * (1 << EFI_PAGE_SHIFT))); switch (desc->type) { case EFI_RESERVED_TYPE: fprintf(fp, "%s", "RESERVED_TYPE"); break; case EFI_LOADER_CODE: fprintf(fp, "%s", "LOADER_CODE"); break; case EFI_LOADER_DATA: fprintf(fp, "%s", "LOADER_DATA"); break; case EFI_BOOT_SERVICES_CODE: fprintf(fp, "%s", "BOOT_SERVICES_CODE"); break; case EFI_BOOT_SERVICES_DATA: fprintf(fp, "%s", "BOOT_SERVICES_DATA"); break; case EFI_RUNTIME_SERVICES_CODE: fprintf(fp, "%s", "RUNTIME_SERVICES_CODE"); break; case EFI_RUNTIME_SERVICES_DATA: fprintf(fp, "%s", "RUNTIME_SERVICES_DATA"); break; case EFI_CONVENTIONAL_MEMORY: fprintf(fp, "%s", "CONVENTIONAL_MEMORY"); break; case EFI_UNUSABLE_MEMORY: fprintf(fp, "%s", "UNUSABLE_MEMORY"); break; case EFI_ACPI_RECLAIM_MEMORY: fprintf(fp, "%s", "ACPI_RECLAIM_MEMORY"); break; case EFI_ACPI_MEMORY_NVS: fprintf(fp, "%s", "ACPI_MEMORY_NVS"); break; case EFI_MEMORY_MAPPED_IO: fprintf(fp, "%s", "MEMORY_MAPPED_IO"); break; case EFI_MEMORY_MAPPED_IO_PORT_SPACE: fprintf(fp, "%s", "MEMORY_MAPPED_IO_PORT_SPACE"); break; case EFI_PAL_CODE: fprintf(fp, "%s", "PAL_CODE"); break; default: fprintf(fp, "%s", "(unknown type)"); break; } fprintf(fp, " "); others = 0; if (desc->attribute & EFI_MEMORY_UC) fprintf(fp, "%sUC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WC) fprintf(fp, "%sWC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WT) fprintf(fp, "%sWT", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WB) fprintf(fp, "%sWB", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WP) fprintf(fp, "%sWP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RP) fprintf(fp, "%sRP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_XP) fprintf(fp, "%sXP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RUNTIME) fprintf(fp, "%sRUNTIME", others++ ? "|" : ""); fprintf(fp, " %s", ia64_available_memory(desc) ? "[available]" : ""); switch (VADDR_REGION(desc->virt_addr)) { case KERNEL_UNCACHED_REGION: fprintf(fp, "[R6]\n"); break; case KERNEL_CACHED_REGION: fprintf(fp, "[R7]\n"); break; default: fprintf(fp, "\n"); } if (!CRASHDEBUG(1)) goto next_desc; fprintf(fp, "physical: %016lx %dk pages: %ld virtual: %016lx\n", desc->phys_addr, (1 << EFI_PAGE_SHIFT)/1024, desc->num_pages, desc->virt_addr); fprintf(fp, "type: "); switch (desc->type) { case EFI_RESERVED_TYPE: fprintf(fp, "%-27s", "RESERVED_TYPE"); break; case EFI_LOADER_CODE: fprintf(fp, "%-27s", "LOADER_CODE"); break; case EFI_LOADER_DATA: fprintf(fp, "%-27s", "LOADER_DATA"); break; case EFI_BOOT_SERVICES_CODE: fprintf(fp, "%-27s", "BOOT_SERVICES_CODE"); break; case EFI_BOOT_SERVICES_DATA: fprintf(fp, "%-27s", "BOOT_SERVICES_DATA"); break; case EFI_RUNTIME_SERVICES_CODE: fprintf(fp, "%-27s", "RUNTIME_SERVICES_CODE"); break; case EFI_RUNTIME_SERVICES_DATA: fprintf(fp, "%-27s", "RUNTIME_SERVICES_DATA"); break; case EFI_CONVENTIONAL_MEMORY: fprintf(fp, "%-27s", "CONVENTIONAL_MEMORY"); break; case EFI_UNUSABLE_MEMORY: fprintf(fp, "%-27s", "UNUSABLE_MEMORY"); break; case EFI_ACPI_RECLAIM_MEMORY: fprintf(fp, "%-27s", "ACPI_RECLAIM_MEMORY"); break; case EFI_ACPI_MEMORY_NVS: fprintf(fp, "%-27s", "ACPI_MEMORY_NVS"); break; case EFI_MEMORY_MAPPED_IO: fprintf(fp, "%-27s", "MEMORY_MAPPED_IO"); break; case EFI_MEMORY_MAPPED_IO_PORT_SPACE: fprintf(fp, "%-27s", "MEMORY_MAPPED_IO_PORT_SPACE"); break; case EFI_PAL_CODE: fprintf(fp, "%-27s", "PAL_CODE"); break; default: fprintf(fp, "%-27s", "(unknown type)"); break; } fprintf(fp, " attribute: ("); others = 0; if (desc->attribute & EFI_MEMORY_UC) fprintf(fp, "%sUC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WC) fprintf(fp, "%sWC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WT) fprintf(fp, "%sWT", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WB) fprintf(fp, "%sWB", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WP) fprintf(fp, "%sWP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RP) fprintf(fp, "%sRP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_XP) fprintf(fp, "%sXP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RUNTIME) fprintf(fp, "%sRUNTIME", others++ ? "|" : ""); fprintf(fp, ") %s\n", ia64_available_memory(desc) ? "[available]" : ""); next_desc: map += ms->efi_memdesc_size; } } static int ia64_available_memory(struct efi_memory_desc_t *desc) { if (desc->attribute & EFI_MEMORY_WB) { switch (desc->type) { case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: return TRUE; } } return FALSE; } /* * Make a copy of the memmap descriptor array. */ static void ia64_create_memmap(void) { struct machine_specific *ms; uint64_t ia64_boot_param, efi_memmap; ulong num_physpages; char *memmap; ms = &ia64_machine_specific; ms->ia64_memmap = NULL; if (symbol_exists("num_physpages")) { get_symbol_data("num_physpages", sizeof(ulong), &num_physpages); machdep->memsize = num_physpages * PAGESIZE(); } if (!symbol_exists("ia64_boot_param")) return; if ((ms->mem_limit = check_mem_limit())) machdep->flags |= MEM_LIMIT; get_symbol_data("ia64_boot_param", sizeof(void *), &ia64_boot_param); if ((ms->mem_limit && (ia64_VTOP(ia64_boot_param) >= ms->mem_limit)) || !readmem(ia64_boot_param+ MEMBER_OFFSET("ia64_boot_param", "efi_memmap"), KVADDR, &efi_memmap, sizeof(uint64_t), "efi_memmap", QUIET|RETURN_ON_ERROR)) { if (!XEN() || CRASHDEBUG(1)) error(WARNING, "cannot read ia64_boot_param: " "memory verification will not be performed\n\n"); return; } readmem(ia64_boot_param+MEMBER_OFFSET("ia64_boot_param", "efi_memmap_size"), KVADDR, &ms->efi_memmap_size, sizeof(uint64_t), "efi_memmap_size", FAULT_ON_ERROR); readmem(ia64_boot_param+MEMBER_OFFSET("ia64_boot_param", "efi_memdesc_size"), KVADDR, &ms->efi_memdesc_size, sizeof(uint64_t), "efi_memdesc_size", FAULT_ON_ERROR); if (!(memmap = (char *) malloc(ms->efi_memmap_size))) { error(WARNING, "cannot malloc ia64_memmap\n"); return; } if ((ms->mem_limit && (efi_memmap >= ms->mem_limit)) || !readmem(PTOV(efi_memmap), KVADDR, memmap, ms->efi_memmap_size, "efi_mmap contents", QUIET|RETURN_ON_ERROR)) { if (!XEN() || (XEN() && CRASHDEBUG(1))) error(WARNING, "cannot read efi_mmap: " "EFI memory verification will not be performed\n\n"); free(memmap); return; } ms->ia64_memmap = memmap; } /* * Kernel pages may cross EFI memmap boundaries, so the system page is * broken into EFI pages, and then each of them is verified. */ static int ia64_verify_paddr(uint64_t paddr) { int i, j, cnt, found, desc_count, desc_size; struct efi_memory_desc_t *desc; struct machine_specific *ms; uint64_t phys_end; char *map; int efi_pages; ulong efi_pagesize; /* * When kernel text and data are mapped in region 5, * and we're using the crash memory device driver, * then the driver will gracefully fail the read attempt * if the address is bogus. */ if ((VADDR_REGION(paddr) == KERNEL_VMALLOC_REGION) && (pc->flags & MEMMOD)) return TRUE; ms = &ia64_machine_specific; if (ms->ia64_memmap == NULL) return TRUE; desc_count = ms->efi_memmap_size/ms->efi_memdesc_size; desc_size = ms->efi_memdesc_size; efi_pagesize = (1 << EFI_PAGE_SHIFT); efi_pages = PAGESIZE() / efi_pagesize; paddr = PAGEBASE(paddr); for (i = cnt = 0; i < efi_pages; i++, paddr += efi_pagesize) { map = ms->ia64_memmap; for (j = found = 0; j < desc_count; j++) { desc = (struct efi_memory_desc_t *)map; if (ia64_available_memory(desc)) { phys_end = desc->phys_addr + (desc->num_pages * efi_pagesize); if ((paddr >= desc->phys_addr) && ((paddr + efi_pagesize) <= phys_end)) { cnt++; found = TRUE; } } if (found) break; map += desc_size; } } return (cnt == efi_pages); } /* * Check whether a "mem=X" argument was entered on the boot command line. * Note that the default setting of the kernel mem_limit is ~0UL. */ static ulong check_mem_limit(void) { ulong mem_limit; char *saved_command_line, *p1, *p2; int len; if (!symbol_exists("mem_limit")) return 0; get_symbol_data("mem_limit", sizeof(ulong), &mem_limit); if (mem_limit == ~0UL) return 0; mem_limit += 1; if (!symbol_exists("saved_command_line")) goto no_command_line; len = get_array_length("saved_command_line", 0, sizeof(char)); if (!len) goto no_command_line; saved_command_line = GETBUF(len+1); if (!readmem(symbol_value("saved_command_line"), KVADDR, saved_command_line, len, "saved_command_line", RETURN_ON_ERROR)) goto no_command_line; if (!(p1 = strstr(saved_command_line, "mem="))) goto no_command_line; p2 = p1; while (*p2 && !whitespace(*p2)) p2++; *p2 = NULLCHAR; error(pc->flags & RUNTIME ? INFO : WARNING, "boot command line argument: %s\n", p1); return mem_limit; no_command_line: error(pc->flags & RUNTIME ? INFO : WARNING, "boot command line memory limit: %lx\n", mem_limit); return mem_limit; } #ifndef _ASM_IA64_UNWIND_H #define _ASM_IA64_UNWIND_H /* * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999-2000 David Mosberger-Tang * * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need * full-blown stack unwinding with all the bells and whitles, so there * is not much point in implementing the full IA-64 unwind API (though * it would of course be possible to implement the kernel API on top * of it). */ struct task_struct; /* forward declaration */ struct switch_stack; /* forward declaration */ enum unw_application_register { UNW_AR_BSP, UNW_AR_BSPSTORE, UNW_AR_PFS, UNW_AR_RNAT, UNW_AR_UNAT, UNW_AR_LC, UNW_AR_EC, UNW_AR_FPSR, UNW_AR_RSC, UNW_AR_CCV }; /* * The following declarations are private to the unwind * implementation: */ struct unw_stack { unsigned long limit; unsigned long top; }; #define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) /* * No user of this module should every access this structure directly * as it is subject to change. It is declared here solely so we can * use automatic variables. */ struct unw_frame_info { struct unw_stack regstk; struct unw_stack memstk; unsigned int flags; short hint; short prev_script; unsigned long bsp; unsigned long sp; /* stack pointer */ unsigned long psp; /* previous sp */ unsigned long ip; /* instruction pointer */ unsigned long pr_val; /* current predicates */ unsigned long *cfm; struct task_struct *task; struct switch_stack *sw; /* preserved state: */ unsigned long *pbsp; /* previous bsp */ unsigned long *bspstore; unsigned long *pfs; unsigned long *rnat; unsigned long *rp; unsigned long *pri_unat; unsigned long *unat; unsigned long *pr; unsigned long *lc; unsigned long *fpsr; struct unw_ireg { unsigned long *loc; struct unw_ireg_nat { int type : 3; /* enum unw_nat_type */ signed int off; /* NaT word is at loc+nat.off */ } nat; } r4, r5, r6, r7; unsigned long *b1, *b2, *b3, *b4, *b5; struct ia64_fpreg *f2, *f3, *f4, *f5, *fr[16]; }; #endif /* _ASM_UNWIND_H */ /* * Perform any leftover pre-prompt machine-specific initialization tasks here. */ static void ia64_post_init(void) { struct machine_specific *ms; struct gnu_request req; struct syment *sp; ulong flag; ms = &ia64_machine_specific; if (symbol_exists("unw_init_frame_info")) { machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("pt_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } else { machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; } ms->unwind_init(); if (!VALID_STRUCT(cpuinfo_ia64)) error(WARNING, "cpuinfo_ia64 structure does not exist\n"); else { if (symbol_exists("_cpu_data")) ms->cpu_data_address = symbol_value("_cpu_data"); else if (symbol_exists("boot_cpu_data")) get_symbol_data("boot_cpu_data", sizeof(ulong), &ms->cpu_data_address); else if (symbol_exists("cpu_data")) ms->cpu_data_address = symbol_value("cpu_data"); else if ((sp = per_cpu_symbol_search("per_cpu__cpu_info")) || (sp = per_cpu_symbol_search("per_cpu__ia64_cpu_info"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) ms->cpu_data_address = sp->value + kt->__per_cpu_offset[0]; else ms->cpu_data_address = sp->value; } else { error(WARNING, "cannot find cpuinfo_ia64 location\n"); ms->cpu_data_address = 0; } if (ms->cpu_data_address) { if (VALID_MEMBER(cpuinfo_ia64_unimpl_va_mask)) readmem(ms->cpu_data_address + OFFSET(cpuinfo_ia64_unimpl_va_mask), KVADDR, &ms->unimpl_va_mask, sizeof(ulong), "unimpl_va_mask", FAULT_ON_ERROR); if (VALID_MEMBER(cpuinfo_ia64_unimpl_pa_mask)) readmem(ms->cpu_data_address + OFFSET(cpuinfo_ia64_unimpl_pa_mask), KVADDR, &ms->unimpl_pa_mask, sizeof(ulong), "unimpl_pa_mask", FAULT_ON_ERROR); } } if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) { get_symbol_type("ia64_init_stack", NULL, &req); ms->ia64_init_stack_size = req.length; } if (DUMPFILE() && ia64_in_init_stack(SWITCH_STACK_ADDR(CURRENT_TASK()))) machdep->flags |= INIT; if (DUMPFILE() && (flag = ia64_in_per_cpu_mca_stack())) machdep->flags |= flag; } /* * Try using the old unwind scheme if the new one fails, * that is as long as the unw_frame_info structs are the * same size. */ static void try_old_unwind(struct bt_info *bt) { if ((machdep->flags & NEW_UNWIND) && (STRUCT_SIZE("unw_frame_info") == sizeof(struct unw_frame_info))) { error(INFO, "unwind: trying old unwind mechanism\n"); ia64_old_unwind(bt); } } /* * Unwind the stack using the basic method used when CONFIG_IA64_NEW_UNWIND * is not configured into the kernel. * * NOTE: see kernel source: show_stack() and/or kdba_bt_stack() */ static void ia64_old_unwind_init(void) { long len; len = STRUCT_SIZE("unw_frame_info"); if (len < 0) { error(WARNING, "cannot determine size of unw_frame_info\n"); machdep->flags |= UNW_OUT_OF_SYNC; } else if (len != sizeof(struct unw_frame_info)) { error(WARNING, "unw_frame_info size differs: %ld (local: %d)\n", len, sizeof(struct unw_frame_info)); machdep->flags |= UNW_OUT_OF_SYNC; } } static int unw_debug; /* debug fprintf indent */ static void ia64_old_unwind(struct bt_info *bt) { struct unw_frame_info unw_frame_info, *info; struct syment *sm; int frame; char *name; if (bt->debug) CRASHDEBUG_SUSPEND(bt->debug); if (CRASHDEBUG(1)) unw_debug = 0; info = &unw_frame_info; unw_init_from_blocked_task(info, bt->task); frame = 0; do { if (info->ip == 0) break; if (!IS_KVADDR(info->ip)) break; if ((sm = value_search(info->ip, NULL))) name = sm->name; else name = "(unknown)"; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(name, bt->ref->str)) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; case BT_REF_HEXVAL: if (bt->ref->hexval == info->ip) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; } } else { fprintf(fp, "%s#%d [BSP:%lx] %s at %lx\n", frame >= 10 ? "" : " ", frame, info->bsp, name, info->ip); if (bt->flags & BT_FULL) rse_function_params(info, name); if (bt->flags & BT_LINE_NUMBERS) ia64_dump_line_number(info->ip); } frame++; if (CRASHDEBUG(1)) unw_debug = 0; if (STREQ(name, "start_kernel")) break; } while (old_unw_unwind(info) >= 0); unwind_return: if (!BT_REFERENCE_CHECK(bt) && !is_kernel_thread(bt->task)) ia64_exception_frame(bt->stacktop - SIZE(pt_regs), bt); if (bt->debug) CRASHDEBUG_RESTORE(); } static unsigned long ia64_rse_slot_num (unsigned long *addr) { return (((unsigned long) addr) >> 3) & 0x3f; } /* * Given a bsp address and a number of register locations, calculate a new * bsp address, accounting for any intervening RNAT stores. */ static unsigned long * ia64_rse_skip_regs (unsigned long *addr, long num_regs) { long delta = ia64_rse_slot_num(addr) + num_regs; if (CRASHDEBUG(1)) { fprintf(fp, "%sia64_rse_skip_regs: ia64_rse_slot_num(%lx): %ld num_regs: %ld\n", space(unw_debug), (ulong)addr, ia64_rse_slot_num(addr), num_regs); } if (num_regs < 0) delta -= 0x3e; if (CRASHDEBUG(1)) { fprintf(fp, "%sia64_rse_skip_regs: delta: %ld return(%lx)", space(unw_debug), delta, (ulong)(addr + num_regs + delta/0x3f)); if (addr > (addr + num_regs + delta/0x3f)) fprintf(fp, "(-%ld)\n", addr - (addr + num_regs + delta/0x3f)); else fprintf(fp, "(+%ld)\n", (addr + num_regs + delta/0x3f) - addr); } return(addr + num_regs + delta/0x3f); } /* * Returns the address of the RNAT slot that covers the slot at * address SLOT_ADDR. */ static unsigned long * ia64_rse_rnat_addr (unsigned long *slot_addr) { return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3)); } /* * Initialize the key fields in the unw_frame_info structure. * * NOTE: see kernel source: unw_init_from_blocked_task() */ static void unw_init_from_blocked_task(struct unw_frame_info *info, ulong task) { ulong sw; ulong sol, limit, top; ulong ar_pfs, ar_bspstore, b0; sw = SWITCH_STACK_ADDR(task); BZERO(info, sizeof(struct unw_frame_info)); readmem(sw + OFFSET(switch_stack_b0), KVADDR, &b0, sizeof(ulong), "switch_stack b0", FAULT_ON_ERROR); readmem(sw + OFFSET(switch_stack_ar_pfs), KVADDR, &ar_pfs, sizeof(ulong), "switch_stack ar_pfs", FAULT_ON_ERROR); readmem(sw + OFFSET(switch_stack_ar_bspstore), KVADDR, &ar_bspstore, sizeof(ulong), "switch_stack ar_bspstore", FAULT_ON_ERROR); sol = (ar_pfs >> 7) & 0x7f; /* size of locals */ limit = task + IA64_RBS_OFFSET; top = ar_bspstore; if ((top - task) >= IA64_STK_OFFSET) top = limit; if (CRASHDEBUG(1)) { unw_debug++; fprintf(fp, "unw_init_from_blocked_task: stack top: %lx sol: %ld\n", top, sol); } info->regstk.limit = limit; info->regstk.top = top; info->sw = (struct switch_stack *)sw; info->bsp = (ulong)ia64_rse_skip_regs((ulong *)info->regstk.top, -sol); info->cfm = (ulong *)(sw + OFFSET(switch_stack_ar_pfs)); info->ip = b0; if (CRASHDEBUG(1)) dump_unw_frame_info(info); } /* * Update the unw_frame_info structure based upon its current state. * This routine works without enabling CONFIG_IA64_NEW_UNWIND because * gdb allocates two additional "local" register locations for each * function, found at the end of the stored locals: * * register "sol-1" (last local) = ar.pfs (gives us previous sol) * register "sol-2" (2nd to last local = b0 to previous address * * NOTE: see kernel source: unw_unwind() (#ifndef CONFIG_IA64_NEW_UNWIND) * On entry, info->regstk.top should point to the register backing * store for r32. */ static int old_unw_unwind (struct unw_frame_info *info) { unsigned long sol, cfm; int is_nat; if (!readmem((ulong)info->cfm, KVADDR, &cfm, sizeof(long), "info->cfm", QUIET|RETURN_ON_ERROR)) return -1; sol = (cfm >> 7) & 0x7f; /* size of locals */ if (CRASHDEBUG(1)) { fprintf(fp, "old_unw_unwind: cfm: %lx sol: %ld\n", cfm, sol); unw_debug++; } /* * In general, we would have to make use of unwind info to * unwind an IA-64 stack, but for now gcc uses a special * convention that makes this possible without full-fledged * unwind info. Specifically, we expect "rp" in the second * last, and "ar.pfs" in the last local register, so the * number of locals in a frame must be at least two. If it's * less than that, we reached the end of the C call stack. */ if (sol < 2) return -1; info->ip = rse_read_reg(info, sol - 2, &is_nat); if (CRASHDEBUG(1)) fprintf(fp, "old_unw_unwind: ip: %lx\n", info->ip); if (is_nat || (info->ip & (machdep->machspec->unimpl_va_mask | 0xf))) return -1; info->cfm = ia64_rse_skip_regs((ulong *)info->bsp, sol - 1); cfm = rse_read_reg(info, sol - 1, &is_nat); if (CRASHDEBUG(1)) fprintf(fp, "old_unw_unwind: info->cfm: %lx => %lx\n", (ulong)info->cfm, cfm); if (is_nat) return -1; sol = (cfm >> 7) & 0x7f; info->bsp = (ulong)ia64_rse_skip_regs((ulong *)info->bsp, -sol); if (CRASHDEBUG(1)) { fprintf(fp, "old_unw_unwind: next sol: %ld\n", sol); fprintf(fp, "old_unw_unwind: next bsp: %lx\n", info->bsp); } return 0; #ifdef KERNEL_SOURCE unsigned long sol, cfm = *info->cfm; int is_nat; sol = (cfm >> 7) & 0x7f; /* size of locals */ /* * In general, we would have to make use of unwind info to * unwind an IA-64 stack, but for now gcc uses a special * convention that makes this possible without full-fledged * unwind info. Specifically, we expect "rp" in the second * last, and "ar.pfs" in the last local register, so the * number of locals in a frame must be at least two. If it's * less than that, we reached the end of the C call stack. */ if (sol < 2) return -1; info->ip = rse_read_reg(info, sol - 2, &is_nat); if (is_nat || (info->ip & (my_cpu_data.unimpl_va_mask | 0xf))) /* reject let obviously bad addresses */ return -1; info->cfm = ia64_rse_skip_regs((unsigned long *) info->bsp, sol - 1); cfm = rse_read_reg(info, sol - 1, &is_nat); if (is_nat) return -1; sol = (cfm >> 7) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -sol); return 0; #endif /* KERNEL_SOURCE */ } /* * Retrieve a register value from the stack, returning its NAT attribute * as well. * * NOTE: see kernel source: read_reg() */ static ulong rse_read_reg (struct unw_frame_info *info, int regnum, int *is_nat) { ulong *addr, *rnat_addr, rnat; ulong regcontent; if (CRASHDEBUG(1)) { fprintf(fp, "%srse_read_reg: bsp: %lx\n", space(unw_debug), info->bsp); unw_debug++; } addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum); if (CRASHDEBUG(1)) { unw_debug--; fprintf(fp, "%srse_read_reg: addr: %lx\n", space(unw_debug), (ulong)addr); } if (((ulong)addr < info->regstk.limit) || ((ulong)addr >= info->regstk.top) || (((long)addr & 0x7) != 0)) { *is_nat = 1; if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: is_nat: %d -- return 0xdeadbeefdeadbeef\n", space(unw_debug), *is_nat); return 0xdeadbeefdeadbeef; } rnat_addr = ia64_rse_rnat_addr(addr); if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: rnat_addr: %lx\n", space(unw_debug), (ulong)rnat_addr); if ((unsigned long) rnat_addr >= info->regstk.top) readmem((ulong)(info->sw) + OFFSET(switch_stack_ar_rnat), KVADDR, &rnat, sizeof(long), "info->sw->ar_rnat", FAULT_ON_ERROR); else readmem((ulong)rnat_addr, KVADDR, &rnat, sizeof(long), "rnat_addr", FAULT_ON_ERROR); *is_nat = (rnat & (1UL << ia64_rse_slot_num(addr))) != 0; if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: rnat: %lx is_nat: %d\n", space(unw_debug), rnat, *is_nat); readmem((ulong)addr, KVADDR, ®content, sizeof(long), "rse_read_reg addr", FAULT_ON_ERROR); if (CRASHDEBUG(1)) { char buf[BUFSIZE]; fprintf(fp, "%srse_read_reg: addr: %lx => %lx ", space(unw_debug), (ulong)addr, regcontent); if (is_kernel_text(regcontent)) fprintf(fp, "(%s)", value_to_symstr(regcontent, buf, pc->output_radix)); fprintf(fp, "\n"); } return regcontent; } /* * Display the arguments to a function, presuming that they are found at * the beginning of the sol section. */ #define MAX_REGISTER_PARAMS (8) static void rse_function_params(struct unw_frame_info *info, char *name) { int i; int numargs, is_nat[MAX_REGISTER_PARAMS]; char buf1[BUFSIZE], buf2[BUFSIZE], *p1, *p2; ulong arglist[MAX_REGISTER_PARAMS]; numargs = MIN(get_function_numargs(info->ip), MAX_REGISTER_PARAMS); if (CRASHDEBUG(1)) fprintf(fp, "rse_function_params: %s: %d args\n", name, numargs); switch (numargs) { case 0: fprintf(fp, " (void)\n"); return; case -1: return; default: break; } for (i = 0; i < numargs; i++) arglist[i] = rse_read_reg(info, i, &is_nat[i]); sprintf(buf1, " ("); for (i = 0; i < numargs; i++) { p1 = &buf1[strlen(buf1)]; if (is_nat[i]) sprintf(buf2, "[NAT]"); else { if ((p2 = value_symbol(arglist[i]))) sprintf(buf2, "%s", p2); else sprintf(buf2, "%lx", arglist[i]); } sprintf(p1, "%s%s", i ? ", " : "", buf2); if (strlen(buf1) >= 80) sprintf(p1, ",\n %s", buf2); } strcat(buf1, ")\n"); fprintf(fp, "%s", buf1); } static void dump_unw_frame_info(struct unw_frame_info *info) { unw_debug++; fprintf(fp, "%sregstk.limit: %lx\n", space(unw_debug), info->regstk.limit); fprintf(fp, "%s regstk.top: %lx\n", space(unw_debug), info->regstk.top); fprintf(fp, "%s sw: %lx\n", space(unw_debug), (ulong)info->sw); fprintf(fp, "%s bsp: %lx\n", space(unw_debug), info->bsp); fprintf(fp, "%s cfm: %lx\n", space(unw_debug), (ulong)info->cfm); fprintf(fp, "%s ip: %lx\n", space(unw_debug), info->ip); unw_debug--; } static const char *hook_files[] = { "arch/ia64/kernel/entry.S", "arch/ia64/kernel/head.S", }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook ia64_line_number_hooks[] = { {"ia64_execve", ENTRY_S}, {"sys_clone2", ENTRY_S}, {"sys_clone", ENTRY_S}, {"ia64_switch_to", ENTRY_S}, {"save_switch_stack", ENTRY_S}, {"load_switch_stack", ENTRY_S}, {"__ia64_syscall", ENTRY_S}, {"invoke_syscall_trace", ENTRY_S}, {"ia64_trace_syscall", ENTRY_S}, {"ia64_ret_from_clone", ENTRY_S}, {"ia64_ret_from_syscall", ENTRY_S}, {"ia64_leave_kernel", ENTRY_S}, {"handle_syscall_error", ENTRY_S}, {"invoke_schedule_tail", ENTRY_S}, {"invoke_schedule", ENTRY_S}, {"handle_signal_delivery", ENTRY_S}, {"sys_rt_sigsuspend", ENTRY_S}, {"sys_rt_sigreturn", ENTRY_S}, {"ia64_prepare_handle_unaligned", ENTRY_S}, {"unw_init_running", ENTRY_S}, {"_start", HEAD_S}, {"ia64_save_debug_regs", HEAD_S}, {"ia64_load_debug_regs", HEAD_S}, {"__ia64_save_fpu", HEAD_S}, {"__ia64_load_fpu", HEAD_S}, {"__ia64_init_fpu", HEAD_S}, {"ia64_switch_mode", HEAD_S}, {"ia64_set_b1", HEAD_S}, {"ia64_set_b2", HEAD_S}, {"ia64_set_b3", HEAD_S}, {"ia64_set_b4", HEAD_S}, {"ia64_set_b5", HEAD_S}, {"ia64_spinlock_contention", HEAD_S}, {NULL, NULL} /* list must be NULL-terminated */ }; void ia64_dump_line_number(ulong ip) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(ip, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; ip = closest_symbol_value(ip); goto try_closest; } } } /* * For now, just make it a region 7 address for all cases, ignoring the * fact that it might be in a 2.6 kernel's non-unity mapped region. XXX */ ulong ia64_PTOV(ulong paddr) { ulong vaddr; switch (machdep->machspec->kernel_region) { case KERNEL_VMALLOC_REGION: // error(FATAL, "ia64_PTOV: TBD for kernels loaded in region 5\n"); default: case KERNEL_CACHED_REGION: vaddr = paddr + (ulong)(KERNEL_CACHED_BASE); } return vaddr; } /* * Account for 2.6 kernel mapping in region 5. */ ulong ia64_VTOP(ulong vaddr) { struct machine_specific *ms; ulong paddr; ms = &ia64_machine_specific; switch (VADDR_REGION(vaddr)) { case KERNEL_CACHED_REGION: paddr = vaddr - (ulong)(KERNEL_CACHED_BASE); break; case KERNEL_UNCACHED_REGION: paddr = vaddr - (ulong)(KERNEL_UNCACHED_BASE); break; /* * Differentiate between a 2.6 kernel address in region 5 and * a real vmalloc() address. */ case KERNEL_VMALLOC_REGION: /* * Real vmalloc() addresses should never be the subject * of a VTOP() translation. */ if (ia64_IS_VMALLOC_ADDR(vaddr) || (ms->kernel_region != KERNEL_VMALLOC_REGION)) return(error(FATAL, "ia64_VTOP(%lx): unexpected region 5 address\n", vaddr)); /* * If it's a region 5 kernel address, subtract the starting * kernel virtual address, and then add the base physical page. */ paddr = vaddr - ms->kernel_start + (ms->phys_start & KERNEL_TR_PAGE_MASK); break; default: return(error(FATAL, "ia64_VTOP(%lx): invalid kernel address\n", vaddr)); } return paddr; } /* * vmalloc() starting address is either the traditional 0xa000000000000000 or * bumped up in 2.6 to 0xa000000200000000. */ int ia64_IS_VMALLOC_ADDR(ulong vaddr) { return ((vaddr >= machdep->machspec->vmalloc_start) && (vaddr < (ulong)KERNEL_UNCACHED_BASE)); } static int compare_kvaddr(const void *v1, const void *v2) { struct vaddr_range *r1, *r2; r1 = (struct vaddr_range *)v1; r2 = (struct vaddr_range *)v2; return (r1->start < r2->start ? -1 : r1->start == r2->start ? 0 : 1); } static int ia64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->identity_map_base; vrp[cnt++].end = vt->high_memory; if (machdep->machspec->kernel_start != machdep->identity_map_base) { vrp[cnt].type = KVADDR_START_MAP; vrp[cnt].start = machdep->machspec->kernel_start; vrp[cnt++].end = kt->end; } vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = machdep->machspec->vmalloc_start; vrp[cnt++].end = (ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT; if (VADDR_REGION(vt->node_table[0].mem_map) == KERNEL_VMALLOC_REGION) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = vt->node_table[0].mem_map; vrp[cnt].end = vt->node_table[vt->numnodes-1].mem_map + (vt->node_table[vt->numnodes-1].size * SIZE(page)); /* * Prevent overlap with KVADDR_VMALLOC range. */ if (vrp[cnt].start > vrp[cnt-1].start) vrp[cnt-1].end = vrp[cnt].start; cnt++; } qsort(vrp, cnt, sizeof(struct vaddr_range), compare_kvaddr); return cnt; } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 4 level page table. */ static int ia64_vtop_4l_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { error(FATAL, "ia64_vtop_4l_xen_wpt: TBD\n"); return FALSE; #ifdef TBD ulong *page_dir; ulong *page_upper; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pud_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); if (!(pud_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; #endif } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 3 level page table. */ static int ia64_vtop_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { error(FATAL, "ia64_vtop_xen_wpt: TBD\n"); return FALSE; #ifdef TBD ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; #endif } #include "netdump.h" #include "xen_dom0.h" /* * Determine the relocatable physical address base. */ static void ia64_calc_phys_start(void) { FILE *iomem; int i, found, errflag; char buf[BUFSIZE]; char *p1; ulong kernel_code_start; struct vmcore_data *vd; ulong phys_start, text_start; Elf64_Phdr *phdr = NULL; /* * Default to 64MB. */ machdep->machspec->phys_start = DEFAULT_PHYS_START; text_start = symbol_exists("_text") ? symbol_value("_text") : BADADDR; if (ACTIVE()) { if ((iomem = fopen("/proc/iomem", "r")) == NULL) return; errflag = 1; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, ": Kernel code")) { clean_line(buf); errflag = 0; break; } } fclose(iomem); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; else *p1 = NULLCHAR; errflag = 0; kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); if (errflag) return; machdep->machspec->phys_start = kernel_code_start; if (CRASHDEBUG(1)) { if (text_start == BADADDR) fprintf(fp, "_text: (unknown) "); else fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); fprintf(fp, "phys_start: %lx\n\n", machdep->machspec->phys_start); } return; } /* * Get relocation value from whatever dumpfile format is being used. */ if (DISKDUMP_DUMPFILE()) { if (diskdump_phys_base(&phys_start)) { machdep->machspec->phys_start = phys_start; if (CRASHDEBUG(1)) fprintf(fp, "compressed kdump: phys_start: %lx\n", phys_start); } return; } else if (LKCD_DUMPFILE()) { if (lkcd_get_kernel_start(&phys_start)) { machdep->machspec->phys_start = phys_start; if (CRASHDEBUG(1)) fprintf(fp, "LKCD dump: phys_start: %lx\n", phys_start); } } if ((vd = get_kdump_vmcore_data())) { /* * There should be at most one region 5 region, and it * should be equal to "_text". If not, take whatever * region 5 address comes first and hope for the best. */ for (i = found = 0; i < vd->num_pt_load_segments; i++) { phdr = vd->load64 + i; if (phdr->p_vaddr == text_start) { machdep->machspec->phys_start = phdr->p_paddr; found++; break; } } for (i = 0; !found && (i < vd->num_pt_load_segments); i++) { phdr = vd->load64 + i; if (VADDR_REGION(phdr->p_vaddr) == KERNEL_VMALLOC_REGION) { machdep->machspec->phys_start = phdr->p_paddr; found++; break; } } if (found && CRASHDEBUG(1)) { if (text_start == BADADDR) fprintf(fp, "_text: (unknown) "); else fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "p_vaddr: %lx p_paddr: %lx\n", phdr->p_vaddr, phdr->p_paddr); } return; } } /* * From the xen vmcore, create an index of mfns for each page that makes * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) { /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) { fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); fprintf(fp, "ia64_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); } if ((xkd->p2m_mfn_frame_list = (ulong *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc p2m_frame_list"); if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->p2m_mfn_frame_list, PAGESIZE(), "xen kdump p2m mfn page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn page\n"); xkd->p2m_frames = PAGESIZE()/sizeof(ulong); pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; } physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo) { ulong pgd_idx, pte_idx; ulong pmd, pte; physaddr_t paddr; /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); xkd->accesses += 2; pgd_idx = (pseudo >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1); pmd = xkd->p2m_mfn_frame_list[pgd_idx] & _PFN_MASK; if (!pmd) { paddr = P2M_FAILURE; goto out; } pmd += ((pseudo >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); if (pmd != xkd->last_pmd_read) { if (!readmem(pmd, PHYSADDR, &pte, sizeof(ulong), "ia64_xen_kdump_p2m pmd", RETURN_ON_ERROR)) { xkd->last_pmd_read = BADADDR; xkd->last_mfn_read = BADADDR; paddr = P2M_FAILURE; goto out; } xkd->last_pmd_read = pmd; } else { pte = xkd->last_mfn_read; xkd->cache_hits++; } pte = pte & _PFN_MASK; if (!pte) { paddr = P2M_FAILURE; goto out; } if (pte != xkd->last_mfn_read) { if (!readmem(pte, PHYSADDR, xkd->page, PAGESIZE(), "ia64_xen_kdump_p2m pte page", RETURN_ON_ERROR)) { xkd->last_pmd_read = BADADDR; xkd->last_mfn_read = BADADDR; paddr = P2M_FAILURE; goto out; } xkd->last_mfn_read = pte; } else xkd->cache_hits++; pte_idx = (pseudo >> PAGESHIFT()) & (PTRS_PER_PTE - 1); paddr = *(((ulong *)xkd->page) + pte_idx); if (!(paddr & _PAGE_P)) { paddr = P2M_FAILURE; goto out; } paddr = (paddr & _PFN_MASK) | PAGEOFFSET(pseudo); out: pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return paddr; } #include "xendump.h" /* * Create an index of mfns for each page that makes up the * kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int ia64_xendump_p2m_create(struct xendump_data *xd) { if (!symbol_exists("phys_to_machine_mapping")) { xd->flags |= XC_CORE_NO_P2M; return TRUE; } error(FATAL, "ia64_xendump_p2m_create: TBD\n"); /* dummy calls for clean "make [wW]arn" */ ia64_debug_dump_page(NULL, NULL, NULL); ia64_xendump_load_page(0, xd); ia64_xendump_page_index(0, xd); ia64_xendump_panic_task(xd); /* externally called */ ia64_get_xendump_regs(xd, NULL, NULL, NULL); /* externally called */ return FALSE; } static void ia64_debug_dump_page(FILE *ofp, char *page, char *name) { int i; ulong *up; fprintf(ofp, "%s\n", name); up = (ulong *)page; for (i = 0; i < 1024; i++) { fprintf(ofp, "%016lx: %016lx %016lx\n", (ulong)((i * 2) * sizeof(ulong)), *up, *(up+1)); up += 2; } } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * ia64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) { error(FATAL, "ia64_xendump_load_page: TBD\n"); return NULL; } /* * Find the dumpfile page index associated with the kvaddr. */ static int ia64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) { error(FATAL, "ia64_xendump_page_index: TBD\n"); return 0; } static ulong ia64_xendump_panic_task(struct xendump_data *xd) { if (CRASHDEBUG(1)) error(INFO, "ia64_xendump_panic_task: TBD\n"); return NO_TASK; } static void ia64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) { machdep->get_stack_frame(bt, rip, rsp); if (is_task_active(bt->task) && !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS)) && STREQ(closest_symbol(*rip), "schedule")) error(INFO, "xendump: switch_stack possibly not saved -- try \"bt -t\"\n"); } /* for XEN Hypervisor analysis */ static int ia64_is_kvaddr_hyper(ulong addr) { return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); } static int ia64_kvtop_hyper(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong virt_percpu_start, phys_percpu_start; ulong addr, dirp, entry; if (!IS_KVADDR(kvaddr)) return FALSE; if (PERCPU_VIRT_ADDR(kvaddr)) { virt_percpu_start = symbol_value("__phys_per_cpu_start"); phys_percpu_start = virt_percpu_start - DIRECTMAP_VIRT_START; *paddr = kvaddr - PERCPU_ADDR + phys_percpu_start; return TRUE; } else if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } else if (!FRAME_TABLE_VIRT_ADDR(kvaddr)) { return FALSE; } /* frametable virtual address */ addr = kvaddr - xhmachdep->frame_table; dirp = symbol_value("frametable_pg_dir"); dirp += ((addr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)) * sizeof(ulong); readmem(dirp, KVADDR, &entry, sizeof(ulong), "frametable_pg_dir", FAULT_ON_ERROR); dirp = entry & _PFN_MASK; if (!dirp) return FALSE; dirp += ((addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); readmem(dirp, PHYSADDR, &entry, sizeof(ulong), "frametable pmd", FAULT_ON_ERROR); dirp = entry & _PFN_MASK; if (!dirp) return FALSE; dirp += ((addr >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) * sizeof(ulong); readmem(dirp, PHYSADDR, &entry, sizeof(ulong), "frametable pte", FAULT_ON_ERROR); if (!(entry & _PAGE_P)) return FALSE; *paddr = (entry & _PFN_MASK) + (kvaddr & (PAGESIZE() - 1)); return TRUE; } static void ia64_post_init_hyper(void) { struct machine_specific *ms; ulong frame_table; ms = &ia64_machine_specific; if (symbol_exists("unw_init_frame_info")) { machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("cpu_user_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } else { machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; } ms->unwind_init(); if (symbol_exists("frame_table")) { frame_table = symbol_value("frame_table"); readmem(frame_table, KVADDR, &xhmachdep->frame_table, sizeof(ulong), "frame_table virtual address", FAULT_ON_ERROR); } else { error(FATAL, "cannot find frame_table virtual address."); } } int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt) { int plen, i; ulong paddr, stackbase, stacktop; ulong *__per_cpu_mca; struct xen_hyper_vcpu_context *vcc; vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); if (!vcc) return 0; if (!symbol_exists("__per_cpu_mca") || !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || (plen < xht->pcpus)) return 0; if (!machdep->kvtop(NULL, addr, &paddr, 0)) return 0; __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * plen); if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, sizeof(ulong) * plen, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) return 0; if (CRASHDEBUG(1)) { for (i = 0; i < plen; i++) { fprintf(fp, "__per_cpu_mca[%d]: %lx\n", i, __per_cpu_mca[i]); } } stackbase = __per_cpu_mca[vcc->processor]; stacktop = stackbase + (STACKSIZE() * 2); FREEBUF(__per_cpu_mca); if ((paddr >= stackbase) && (paddr < stacktop)) return 1; else return 0; } static void ia64_init_hyper(int when) { struct syment *sp; switch (when) { case SETUP_ENV: #if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); #endif #if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); #endif break; case PRE_SYMTAB: machdep->verify_symbol = ia64_verify_symbol; machdep->machspec = &ia64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); switch (machdep->pagesize) { case 4096: machdep->stacksize = (power(2, 3) * PAGESIZE()); break; case 8192: machdep->stacksize = (power(2, 2) * PAGESIZE()); break; case 16384: machdep->stacksize = (power(2, 1) * PAGESIZE()); break; case 65536: machdep->stacksize = (power(2, 0) * PAGESIZE()); break; default: machdep->stacksize = 32*1024; break; } if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = ia64_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->machspec->phys_start = UNKNOWN_PHYS_START; /* ODA: if need make hyper version if (machdep->cmdline_args[0]) parse_cmdline_args(); */ break; case PRE_GDB: if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->kvbase = HYPERVISOR_VIRT_START; machdep->identity_map_base = HYPERVISOR_VIRT_START; machdep->is_kvaddr = ia64_is_kvaddr_hyper; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ia64_eframe_search; machdep->back_trace = ia64_back_trace_cmd; machdep->processor_speed = xen_hyper_ia64_processor_speed; machdep->uvtop = ia64_uvtop; machdep->kvtop = ia64_kvtop_hyper; machdep->get_stack_frame = ia64_get_stack_frame; machdep->get_stackbase = ia64_get_stackbase; machdep->get_stacktop = ia64_get_stacktop; machdep->translate_pte = ia64_translate_pte; machdep->memory_size = xen_hyper_ia64_memory_size; machdep->dis_filter = ia64_dis_filter; machdep->cmd_mach = ia64_cmd_mach; machdep->get_smp_cpus = xen_hyper_ia64_get_smp_cpus; machdep->line_number_hooks = ia64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; if ((sp = symbol_search("_stext"))) { machdep->machspec->kernel_region = VADDR_REGION(sp->value); machdep->machspec->kernel_start = sp->value; } else { // machdep->machspec->kernel_region = KERNEL_CACHED_REGION; // machdep->machspec->kernel_start = KERNEL_CACHED_BASE; } /* machdep table for Xen Hypervisor */ xhmachdep->pcpu_init = xen_hyper_ia64_pcpu_init; break; case POST_GDB: STRUCT_SIZE_INIT(switch_stack, "switch_stack"); MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph"); MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0"); MEMBER_OFFSET_INIT(switch_stack_ar_bspstore, "switch_stack", "ar_bspstore"); MEMBER_OFFSET_INIT(switch_stack_ar_pfs, "switch_stack", "ar_pfs"); MEMBER_OFFSET_INIT(switch_stack_ar_rnat, "switch_stack", "ar_rnat"); MEMBER_OFFSET_INIT(switch_stack_pr, "switch_stack", "pr"); XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64"); XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq"); XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_vendor, "cpuinfo_ia64", "vendor"); if (symbol_exists("per_cpu__cpu_info")) { xht->cpu_data_address = symbol_value("per_cpu__cpu_info"); } /* kakuma Can this be calculated? */ if (!machdep->hz) { machdep->hz = XEN_HYPER_HZ; } break; case POST_INIT: ia64_post_init_hyper(); break; } } #endif crash-7.1.4/xen_hyper_defs.h0000664000000000000000000007337012634305150014507 0ustar rootroot/* * xen_hyper_defs.h * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifdef XEN_HYPERVISOR_ARCH #include #include #ifdef X86 /* Xen Hypervisor address space layout */ #define IOREMAP_VIRT_END (0UL) #define IOREMAP_VIRT_START (0xFFC00000UL) #define DIRECTMAP_VIRT_END IOREMAP_VIRT_START #define DIRECTMAP_VIRT_START (0xFF000000UL) #define MAPCACHE_VIRT_END DIRECTMAP_VIRT_START #define MAPCACHE_VIRT_START (0xFFC00000UL) #define PERDOMAIN_VIRT_END DIRECTMAP_VIRT_START #define PERDOMAIN_VIRT_START (0xFE800000UL) #define SH_LINEAR_PT_VIRT_END PERDOMAIN_VIRT_START #define SH_LINEAR_PT_VIRT_START (0xFE400000UL) #define SH_LINEAR_PT_VIRT_START_PAE (0xFE000000UL) #define LINEAR_PT_VIRT_END SH_LINEAR_PT_VIRT_START #define LINEAR_PT_VIRT_START (0xFE000000UL) #define LINEAR_PT_VIRT_START_PAE (0xFD800000UL) #define RDWR_MPT_VIRT_END LINEAR_PT_VIRT_START #define RDWR_MPT_VIRT_START (0xFDC00000UL) #define RDWR_MPT_VIRT_START_PAE (0xFC800000UL) #define FRAMETABLE_VIRT_END RDWR_MPT_VIRT_START #define FRAMETABLE_VIRT_START (0xFC400000UL) #define FRAMETABLE_VIRT_START_PAE (0xF6800000UL) #define RO_MPT_VIRT_END FRAMETABLE_VIRT_START #define RO_MPT_VIRT_START (0xFC000000UL) #define RO_MPT_VIRT_START_PAE (0xF5800000UL) #define HYPERVISOR_VIRT_START RO_MPT_VIRT_START #define HYPERVISOR_VIRT_START_PAE RO_MPT_VIRT_START_PAE #endif #ifdef X86_64 #define HYPERVISOR_VIRT_START (0xffff800000000000) #define HYPERVISOR_VIRT_END (0xffff880000000000) #define DIRECTMAP_VIRT_START (0xffff830000000000) #define DIRECTMAP_VIRT_END (0xffff840000000000) #define PAGE_OFFSET_XEN_HYPER DIRECTMAP_VIRT_START #define XEN_VIRT_START (xht->xen_virt_start) #define XEN_VIRT_ADDR(vaddr) \ (((vaddr) >= XEN_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_START)) #endif #ifdef IA64 #define HYPERVISOR_VIRT_START (0xe800000000000000) #define HYPERVISOR_VIRT_END (0xf800000000000000) #define DEFAULT_SHAREDINFO_ADDR (0xf100000000000000) #define PERCPU_PAGE_SIZE 65536 #define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE) #define DIRECTMAP_VIRT_START (0xf000000000000000) #define DIRECTMAP_VIRT_END PERCPU_ADDR #define VIRT_FRAME_TABLE_SIZE (0x0100000000000000) #define PERCPU_VIRT_ADDR(vaddr) \ (((vaddr) >= PERCPU_ADDR) && ((vaddr) < PERCPU_ADDR + PERCPU_PAGE_SIZE)) #define FRAME_TABLE_VIRT_ADDR(vaddr) \ ((vaddr) >= xhmachdep->frame_table && (vaddr) < xhmachdep->frame_table + VIRT_FRAME_TABLE_SIZE) #undef IA64_RBS_OFFSET #define IA64_RBS_OFFSET ((XEN_HYPER_SIZE(vcpu) + 15) & ~15) #endif /* IA64 */ #define DIRECTMAP_VIRT_ADDR(vaddr) \ (((vaddr) >= DIRECTMAP_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_END)) typedef uint16_t domid_t; typedef uint32_t Elf_Word; /* * NOTE kakuma: The following defines are temporary version for * elf note format which is used only in crash. */ #define XEN_HYPER_ELF_NOTE_V1 1 #define XEN_HYPER_ELF_NOTE_V2 2 #define XEN_HYPER_ELF_NOTE_V3 3 #define XEN_HYPER_ELF_NOTE_V4 4 #ifdef X86 #define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE 0x100 #endif #if defined(X86_64) || defined(IA64) #define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE 0x200 #endif /* * Xen Hyper */ #define XEN_HYPER_SMP (0x400) #ifdef X86 #define XEN_HYPER_MAX_VIRT_CPUS (32) #define XEN_HYPER_HZ 100 #endif #ifdef X86_64 #define XEN_HYPER_MAX_VIRT_CPUS (32) #define XEN_HYPER_HZ 100 #endif #ifdef IA64 #define XEN_HYPER_MAX_VIRT_CPUS (64) #define XEN_HYPER_HZ 100 #endif #ifndef XEN_HYPER_MAX_VIRT_CPUS #define XEN_HYPER_MAX_VIRT_CPUS (1) #endif #if defined(X86) || defined(X86_64) #define xen_hyper_per_cpu(var, cpu) \ ({ ulong __var_addr; \ if (xht->__per_cpu_offset) \ __var_addr = (xht->flags & XEN_HYPER_SMP) ? \ ((ulong)(var) + xht->__per_cpu_offset[cpu]) : (ulong)(var); \ else \ __var_addr = (ulong)(var) + ((ulong)(cpu) << xht->percpu_shift); \ __var_addr; }) #elif defined(IA64) #define xen_hyper_per_cpu(var, cpu) \ ((xht->flags & XEN_HYPER_SMP) ? \ (ulong)(var) + (xht->__per_cpu_offset[cpu]) : \ (ulong)(var)) #endif #if defined(X86) || defined(X86_64) #define XEN_HYPER_STACK_ORDER 2 #if 0 #define XEN_HYPER_STACK_SIZE (machdep->pagesize << XEN_HYPER_STACK_ORDER) #endif #define XEN_HYPER_GET_CPU_INFO(sp) \ ((sp & ~(STACKSIZE()-1)) | \ (STACKSIZE() - XEN_HYPER_SIZE(cpu_info))) #endif #define XEN_HYPER_CONRING_SIZE 16384 /* system time */ #define XEN_HYPER_NANO_TO_SEC(ns) ((ulonglong)((ns) / 1000000000ULL)) #define XEN_HYPER_MICR_TO_SEC(us) ((ulonglong)((us) / 1000000ULL)) #define XEN_HYPER_MILI_TO_SEC(ms) ((ulonglong)((ms) / 1000ULL)) /* * Domain */ /* Prepared domain ID. */ #define XEN_HYPER_DOMID_IO (0x7FF1U) #define XEN_HYPER_DOMID_XEN (0x7FF2U) #define XEN_HYPER_DOMID_IDLE (0x7FFFU) /* Domain flags (domain_flags). */ /* Is this domain privileged? */ #define XEN_HYPER__DOMF_privileged 0 #define XEN_HYPER_DOMF_privileged (1UL<= 0) #define XEN_HYPER_VALID_STRUCT(X) (xen_hyper_size_table.X >= 0) #define XEN_HYPER_VALID_MEMBER(X) (xen_hyper_offset_table.X >= 0) #define XEN_HYPER_ASSIGN_SIZE(X) (xen_hyper_size_table.X) #define XEN_HYPER_ASSIGN_OFFSET(X) (xen_hyper_offset_table.X) #define XEN_HYPER_STRUCT_SIZE_INIT(X, Y) (XEN_HYPER_ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) #define XEN_HYPER_MEMBER_SIZE_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) #define XEN_HYPER_MEMBER_OFFSET_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) /* * System */ #define XEN_HYPER_MAX_CPUS() (xht->max_cpus) #define XEN_HYPER_CRASHING_CPU() (xht->crashing_cpu) /* * Dump information */ #define XEN_HYPER_X86_NOTE_EIP(regs) (regs[12]) #define XEN_HYPER_X86_NOTE_ESP(regs) (regs[15]) #define XEN_HYPER_X86_64_NOTE_RIP(regs) (regs[16]) #define XEN_HYPER_X86_64_NOTE_RSP(regs) (regs[19]) /* * Domain */ #define XEN_HYPER_DOMAIN_F_INIT 0x1 #define XEN_HYPER_NR_DOMAINS() (xht->domains) #define XEN_HYPER_RUNNING_DOMAINS() (xhdt->running_domains) /* * Phisycal CPU */ #define XEN_HYPER_NR_PCPUS() (xht->pcpus) #define for_cpu_indexes(i, cpuid) \ for (i = 0, cpuid = xht->cpu_idxs[i]; \ i < XEN_HYPER_NR_PCPUS(); \ cpuid = xht->cpu_idxs[++i]) #define XEN_HYPER_CURR_VCPU(pcpuid) \ (xen_hyper_get_active_vcpu_from_pcpuid(pcpuid)) /* * VCPU */ #define XEN_HYPER_VCPU_F_INIT 0x1 #define XEN_HYPER_NR_VCPUS_IN_DOM(domain_context) (domain_context->vcpu_cnt) #define XEN_HYPER_VCPU_LAST_CONTEXT() (xhvct->last) /* * tools */ #define XEN_HYPER_PRI(fp, len, str, buf, flag, args) \ sprintf args; \ xen_hyper_fpr_indent(fp, len, str, buf, flag); #define XEN_HYPER_PRI_CONST(fp, len, str, flag) \ xen_hyper_fpr_indent(fp, len, str, NULL, flag); #define XEN_HYPER_PRI_L (0x0) #define XEN_HYPER_PRI_R (0x1) #define XEN_HYPER_PRI_LF (0x2) /* * Global data */ extern struct xen_hyper_machdep_table *xhmachdep; extern struct xen_hyper_table *xht; extern struct xen_hyper_dumpinfo_table *xhdit; extern struct xen_hyper_domain_table *xhdt; extern struct xen_hyper_vcpu_table *xhvct; extern struct xen_hyper_pcpu_table *xhpct; extern struct xen_hyper_sched_table *xhscht; extern struct xen_hyper_symbol_table_data *xhsymt; extern struct xen_hyper_offset_table xen_hyper_offset_table; extern struct xen_hyper_size_table xen_hyper_size_table; extern struct command_table_entry xen_hyper_command_table[]; extern struct task_context fake_tc; /* * Xen Hyper command help */ extern char *xen_hyper_help_domain[]; extern char *xen_hyper_help_doms[]; extern char *xen_hyper_help_dumpinfo[]; extern char *xen_hyper_help_log[]; extern char *xen_hyper_help_pcpus[]; extern char *xen_hyper_help_sched[]; extern char *xen_hyper_help_sys[]; extern char *xen_hyper_help_vcpu[]; extern char *xen_hyper_help_vcpus[]; /* * Prototype */ ulonglong xen_hyper_get_uptime_hyper(void); /* * x86 */ int xen_hyper_x86_get_smp_cpus(void); uint64_t xen_hyper_x86_memory_size(void); /* * IA64 */ int xen_hyper_ia64_get_smp_cpus(void); uint64_t xen_hyper_ia64_memory_size(void); ulong xen_hyper_ia64_processor_speed(void); /* * Xen Hyper */ void xen_hyper_init(void); void xen_hyper_domain_init(void); void xen_hyper_vcpu_init(void); void xen_hyper_dumpinfo_init(void); void xen_hyper_misc_init(void); void xen_hyper_post_init(void); struct xen_hyper_dumpinfo_context *xen_hyper_id_to_dumpinfo_context(uint id); struct xen_hyper_dumpinfo_context *xen_hyper_note_to_dumpinfo_context(ulong note); char *xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type); /* domain */ void xen_hyper_refresh_domain_context_space(void); int xen_hyper_get_domains(void); char *xen_hyper_get_domain_next(int mod, ulong *next); domid_t xen_hyper_domain_to_id(ulong domain); char *xen_hyper_id_to_domain_struct(domid_t id); struct xen_hyper_domain_context * xen_hyper_domain_to_domain_context(ulong domain); struct xen_hyper_domain_context * xen_hyper_id_to_domain_context(domid_t id); struct xen_hyper_domain_context * xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc, ulong domain, char *dp); char *xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc); char *xen_hyper_read_domain(ulong domain); char *xen_hyper_read_domain_verify(ulong domain); char *xen_hyper_fill_domain_struct(ulong domain, char *domain_struct); void xen_hyper_alloc_domain_context_space(int domains); ulong xen_hyper_domain_state(struct xen_hyper_domain_context *dc); /* vcpu */ void xen_hyper_refresh_vcpu_context_space(void); struct xen_hyper_vcpu_context * xen_hyper_vcpu_to_vcpu_context(ulong vcpu); struct xen_hyper_vcpu_context * xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid); struct xen_hyper_vcpu_context_array * xen_hyper_domain_to_vcpu_context_array(ulong domain); struct xen_hyper_vcpu_context_array * xen_hyper_domid_to_vcpu_context_array(domid_t id); struct xen_hyper_vcpu_context * xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc, ulong vcpu, char *vcp); char * xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc); char *xen_hyper_read_vcpu(ulong vcpu); char *xen_hyper_read_vcpu_verify(ulong vcpu); char *xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct); void xen_hyper_alloc_vcpu_context_arrays_space(int domains); void xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus); int xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc); /* pcpu */ #if defined(X86) || defined(X86_64) void xen_hyper_x86_pcpu_init(void); #elif defined(IA64) void xen_hyper_ia64_pcpu_init(void); #endif struct xen_hyper_pcpu_context *xen_hyper_id_to_pcpu_context(uint id); struct xen_hyper_pcpu_context *xen_hyper_pcpu_to_pcpu_context(ulong pcpu); struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc, ulong pcpu, char *pcp); struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc, ulong init_tss, char *tss); char *xen_hyper_read_pcpu(ulong pcpu); char *xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct); void xen_hyper_alloc_pcpu_context_space(int pcpus); /* others */ char *xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86); char *xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64); int xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc); void xen_hyper_print_bt_header(FILE *out, ulong pcpu, int newline); ulong xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpu); ulong xen_hyper_pcpu_to_active_vcpu(ulong pcpu); void xen_hyper_get_cpu_info(void); int xen_hyper_test_pcpu_id(uint pcpu_id); /* * Xen Hyper command */ void xen_hyper_cmd_help(void); void xen_hyper_cmd_domain(void); void xen_hyper_cmd_doms(void); void xen_hyper_cmd_dumpinfo(void); void xen_hyper_cmd_log(void); void xen_hyper_dump_log(void); void xen_hyper_cmd_pcpus(void); void xen_hyper_cmd_sched(void); void xen_hyper_cmd_sys(void); void xen_hyper_cmd_vcpu(void); void xen_hyper_cmd_vcpus(void); void xen_hyper_display_sys_stats(void); void xen_hyper_show_vcpu_context(struct xen_hyper_vcpu_context *vcc); char *xen_hyper_domain_state_string(struct xen_hyper_domain_context *dc, char *buf, int verbose); char *xen_hyper_vcpu_state_string(struct xen_hyper_vcpu_context *vcc, char *buf, int verbose); /* tools */ void xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag); #else #define XEN_HYPERVISOR_NOT_SUPPORTED \ "Xen hypervisor mode not supported on this architecture\n" #endif crash-7.1.4/extensions.c0000775000000000000000000003202312634305150013670 0ustar rootroot/* extensions.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013 David Anderson * Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include static int in_extensions_library(char *, char *); static char *get_extensions_directory(char *); #define DUMP_EXTENSIONS (0) #define LOAD_EXTENSION (1) #define UNLOAD_EXTENSION (2) /* * Load, unload, or list the extension libaries. */ void cmd_extend(void) { int c; int flag; flag = DUMP_EXTENSIONS; while ((c = getopt(argcnt, args, "lu")) != EOF) { switch(c) { case 'l': if (flag & UNLOAD_EXTENSION) { error(INFO, "-l and -u are mutually exclusive\n"); argerrs++; } else flag |= LOAD_EXTENSION; break; case 'u': if (flag & LOAD_EXTENSION) { error(INFO, "-u and -l are mutually exclusive\n"); argerrs++; } else flag |= UNLOAD_EXTENSION; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); switch (flag) { case DUMP_EXTENSIONS: if (!args[optind]) { dump_extension_table(!VERBOSE); return; } /* FALLTHROUGH */ case LOAD_EXTENSION: if (!args[optind]) { error(INFO, "-l requires one or more extension library arguments\n"); cmd_usage(pc->curcmd, SYNOPSIS); break; } while (args[optind]) { load_extension(args[optind]); optind++; } break; case UNLOAD_EXTENSION: if (!args[optind]) { unload_extension(NULL); break; } while (args[optind]) { unload_extension(args[optind]); optind++; } break; } } /* * List all extension libaries and their commands in either the extend * command format or for "help -e" (verbose). */ void dump_extension_table(int verbose) { int i; struct extension_table *ext; struct command_table_entry *cp; char buf[BUFSIZE]; int longest, others; if (!extension_table) return; if (verbose) { for (ext = extension_table; ext; ext = ext->next) { fprintf(fp, " filename: %s\n", ext->filename); fprintf(fp, " handle: %lx\n", (ulong)ext->handle); fprintf(fp, " flags: %lx (", ext->flags); others = 0; if (ext->flags & REGISTERED) fprintf(fp, "%sREGISTERED", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " next: %lx\n", (ulong)ext->next); fprintf(fp, " prev: %lx\n", (ulong)ext->prev); for (i = 0, cp = ext->command_table; cp->name; cp++, i++) { fprintf(fp, "command_table[%d]: %lx\n", i, (ulong)cp); fprintf(fp, " name: %s\n", cp->name); fprintf(fp, " func: %lx\n", (ulong)cp->func); fprintf(fp, " help_data: %lx\n", (ulong)cp->help_data); fprintf(fp, " flags: %lx (", cp->flags); others = 0; if (cp->flags & CLEANUP) fprintf(fp, "%sCLEANUP", others++ ? "|" : ""); if (cp->flags & REFRESH_TASK_TABLE) fprintf(fp, "%sREFRESH_TASK_TABLE", others++ ? "|" : ""); if (cp->flags & HIDDEN_COMMAND) fprintf(fp, "%sHIDDEN_COMMAND", others++ ? "|" : ""); fprintf(fp, ")\n"); } if (ext->next) fprintf(fp, "\n"); } return; } /* * Print them out in the order they were loaded. */ for (longest = 0, ext = extension_table; ext; ext = ext->next) { if (strlen(ext->filename) > longest) longest = strlen(ext->filename); } fprintf(fp, "%s COMMANDS\n", mkstring(buf, longest, LJUST, "SHARED OBJECT")); longest = MAX(longest, strlen("SHARED OBJECT")); for (ext = extension_table; ext; ext = ext->next) if (ext->next == NULL) break; do { fprintf(fp, "%s ", mkstring(buf, longest, LJUST, ext->filename)); for (cp = ext->command_table; cp->name; cp++) fprintf(fp, "%s ", cp->name); fprintf(fp, "\n"); } while ((ext = ext->prev)); } /* * Load an extension library. */ void load_extension(char *lib) { struct extension_table *ext, *curext; char buf[BUFSIZE]; size_t size; char *env; int env_len; if ((env = getenv("CRASH_EXTENSIONS"))) env_len = strlen(env)+1; else env_len = 0; size = sizeof(struct extension_table) + strlen(lib) + MAX(env_len, strlen("/usr/lib64/crash/extensions/")) + 1; if ((ext = (struct extension_table *)malloc(size)) == NULL) error(FATAL, "cannot malloc extension_table space."); BZERO(ext, size); ext->filename = (char *)((ulong)ext + sizeof(struct extension_table)); /* * If the library is not specified by an absolute pathname, dlopen() * does not look in the current directory, so modify the filename. * If it's not in the current directory, check the extensions library * directory. */ if ((*lib != '.') && (*lib != '/')) { if (file_exists(lib, NULL)) sprintf(ext->filename, "./%s", lib); else if (in_extensions_library(lib, buf)) strcpy(ext->filename, buf); else { error(INFO, "%s: %s\n", lib, strerror(ENXIO)); free(ext); return; } } else strcpy(ext->filename, lib); if (!is_shared_object(ext->filename)) { error(INFO, "%s: not an ELF format object file\n", ext->filename); free(ext); return; } for (curext = extension_table; curext; curext = curext->next) { if (same_file(curext->filename, ext->filename)) { fprintf(fp, "%s: shared object already loaded\n", ext->filename); free(ext); return; } } /* * register_extension() will be called by the shared object's * _init() function before dlopen() returns below. */ pc->curext = ext; ext->handle = dlopen(ext->filename, RTLD_NOW|RTLD_GLOBAL); if (!ext->handle) { strcpy(buf, dlerror()); error(INFO, "%s\n", buf); if (strstr(buf, "undefined symbol: register_extension")) { error(INFO, "%s may be statically linked: ", pc->program_name); fprintf(fp, "recompile without the -static flag\n"); } free(ext); return; } if (!(ext->flags & REGISTERED)) { dlclose(ext->handle); if (ext->flags & (DUPLICATE_COMMAND_NAME | NO_MINIMAL_COMMANDS)) error(INFO, "%s: shared object unloaded\n", ext->filename); else error(INFO, "%s: no commands registered: shared object unloaded\n", ext->filename); free(ext); return; } fprintf(fp, "%s: shared object loaded\n", ext->filename); /* * Put new libraries at the head of the list. */ if (extension_table) { extension_table->prev = ext; ext->next = extension_table; } extension_table = ext; help_init(); } /* * Check the extensions library directories. */ static int in_extensions_library(char *lib, char *buf) { char *env; if ((env = getenv("CRASH_EXTENSIONS"))) { sprintf(buf, "%s%s%s", env, LASTCHAR(env) == '/' ? "" : "/", lib); if (file_exists(buf, NULL)) return TRUE; } if (BITS64()) { sprintf(buf, "/usr/lib64/crash/extensions/%s", lib); if (file_exists(buf, NULL)) return TRUE; } sprintf(buf, "/usr/lib/crash/extensions/%s", lib); if (file_exists(buf, NULL)) return TRUE; sprintf(buf, "./extensions/%s", lib); if (file_exists(buf, NULL)) return TRUE; return FALSE; } /* * Look for an extensions directory using the proper order. */ static char * get_extensions_directory(char *dirbuf) { char *env; if ((env = getenv("CRASH_EXTENSIONS"))) { if (is_directory(env)) { strcpy(dirbuf, env); return dirbuf; } } if (BITS64()) { sprintf(dirbuf, "/usr/lib64/crash/extensions"); if (is_directory(dirbuf)) return dirbuf; } sprintf(dirbuf, "/usr/lib/crash/extensions"); if (is_directory(dirbuf)) return dirbuf; sprintf(dirbuf, "./extensions"); if (is_directory(dirbuf)) return dirbuf; return NULL; } void preload_extensions(void) { DIR *dirp; struct dirent *dp; char dirbuf[BUFSIZE]; char filename[BUFSIZE]; int found; if (!get_extensions_directory(dirbuf)) return; dirp = opendir(dirbuf); if (!dirp) { error(INFO, "%s: %s\n", dirbuf, strerror(errno)); return; } pc->curcmd = pc->program_name; for (found = 0, dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { sprintf(filename, "%s%s%s", dirbuf, LASTCHAR(dirbuf) == '/' ? "" : "/", dp->d_name); if (!is_shared_object(filename)) continue; found++; load_extension(dp->d_name); } closedir(dirp); if (found) fprintf(fp, "\n"); else error(NOTE, "%s: no extension modules found in directory\n\n", dirbuf); } /* * Unload all, or as specified, extension libraries. */ void unload_extension(char *lib) { struct extension_table *ext; int found; char buf[BUFSIZE]; if (!lib) { while (extension_table) { ext = extension_table; if (dlclose(ext->handle)) error(FATAL, "dlclose: %s: shared object not open\n", ext->filename); fprintf(fp, "%s: shared object unloaded\n", ext->filename); extension_table = ext->next; free(ext); } help_init(); return; } if ((*lib != '.') && (*lib != '/')) { if (!file_exists(lib, NULL) && in_extensions_library(lib, buf)) lib = buf; } if (!file_exists(lib, NULL)) { error(INFO, "%s: %s\n", lib, strerror(ENXIO)); return; } for (ext = extension_table, found = FALSE; ext; ext = ext->next) { if (same_file(lib, ext->filename)) { found = TRUE; if (dlclose(ext->handle)) error(INFO, "dlclose: %s: shared object not open\n", ext->filename); else { fprintf(fp, "%s: shared object unloaded\n", ext->filename); if (extension_table == ext) { /* first */ extension_table = ext->next; if (ext->next) ext->next->prev = NULL; } else if (ext->next == NULL) /* last */ ext->prev->next = NULL; else { /* middle */ ext->prev->next = ext->next; ext->next->prev = ext->prev; } free(ext); help_init(); break; } } else if (STREQ(basename(lib), basename(ext->filename))) { error(INFO, "%s and %s are different object files\n", lib, ext->filename); found = TRUE; } } if (!found) error(INFO, "%s: not loaded\n", lib); } /* * Register the command_table as long as there are no command namespace * clashes with the currently-existing command set. Also delete any aliases * that clash, giving the registered command name priority. * * This function is called from the shared object's _init() function * before the dlopen() call returns back to load_extension() above. * The mark of approval for load_extension() is the setting of the * REGISTERED bit in the "current" extension_table structure flags. */ void register_extension(struct command_table_entry *command_table) { struct command_table_entry *cp; pc->curext->flags |= NO_MINIMAL_COMMANDS; for (cp = command_table; cp->name; cp++) { if (get_command_table_entry(cp->name)) { error(INFO, "%s: \"%s\" is a duplicate of a currently-existing command\n", pc->curext->filename, cp->name); pc->curext->flags |= DUPLICATE_COMMAND_NAME; return; } if (cp->flags & MINIMAL) pc->curext->flags &= ~NO_MINIMAL_COMMANDS; } if ((pc->flags & MINIMAL_MODE) && (pc->curext->flags & NO_MINIMAL_COMMANDS)) { error(INFO, "%s: does not contain any commands which support minimal mode\n", pc->curext->filename); return; } if (pc->flags & MINIMAL_MODE) { for (cp = command_table; cp->name; cp++) { if (!(cp->flags & MINIMAL)) { error(WARNING, "%s: command \"%s\" does not support minimal mode\n", pc->curext->filename, cp->name); } } } for (cp = command_table; cp->name; cp++) { if (is_alias(cp->name)) { error(INFO, "alias \"%s\" deleted: name clash with extension command\n", cp->name); deallocate_alias(cp->name); } } pc->curext->command_table = command_table; pc->curext->flags |= REGISTERED; /* Mark of approval */ } /* * Hooks for sial. */ unsigned long get_curtask(void) { return CURRENT_TASK(); } char * crash_global_cmd(void) { return pc->curcmd; } struct command_table_entry * crash_cmd_table(void) { return pc->cmd_table; } crash-7.1.4/ppc.c0000775000000000000000000016050112634305150012256 0ustar rootroot/* ppc.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2007, 2010-2014 David Anderson * Copyright (C) 2002-2007, 2010-2014 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef PPC #include "defs.h" #include #define MAX_PLATFORM_LEN 32 /* length for platform string */ /* * This structure was copied from kernel source * in include/asm-ppc/ptrace.h */ struct ppc_pt_regs { long gpr[32]; long nip; long msr; long orig_gpr3; /* Used for restarting system calls */ long ctr; long link; long xer; long ccr; long mq; /* 601 only (not used at present) */ /* Used on APUS to hold IPL value. */ long trap; /* Reason for being here */ long dar; /* Fault registers */ long dsisr; long result; /* Result of a system call */ }; static int ppc_kvtop(struct task_context *, ulong, physaddr_t *, int); static int ppc_uvtop(struct task_context *, ulong, physaddr_t *, int); static ulong ppc_vmalloc_start(void); static int ppc_is_task_addr(ulong); static int ppc_verify_symbol(const char *, ulong, char); static ulong ppc_get_task_pgd(ulong); static int ppc_translate_pte(ulong, void *, ulonglong); static ulong ppc_processor_speed(void); static int ppc_eframe_search(struct bt_info *); static ulong ppc_in_irqstack(ulong); static void ppc_back_trace_cmd(struct bt_info *); static void ppc_back_trace(struct gnu_request *, struct bt_info *); static void get_ppc_frame(struct bt_info *, ulong *, ulong *); static void ppc_print_stack_entry(int,struct gnu_request *, ulong, ulong, struct bt_info *); static char *ppc_check_eframe(struct ppc_pt_regs *); static void ppc_print_eframe(char *, struct ppc_pt_regs *, struct bt_info *); static void ppc_print_regs(struct ppc_pt_regs *); static void ppc_display_full_frame(struct bt_info *, ulong, FILE *); static void ppc_dump_irq(int); static void ppc_get_stack_frame(struct bt_info *, ulong *, ulong *); static int ppc_dis_filter(ulong, char *, unsigned int); static void ppc_cmd_mach(void); static int ppc_get_smp_cpus(void); static void ppc_display_machine_stats(void); static void ppc_dump_line_number(ulong); static struct line_number_hook ppc_line_number_hooks[]; static struct machine_specific ppc_machine_specific = { 0 }; static int probe_default_platform(char *); static int probe_ppc44x_platform(char *); static int probe_ppce500_platform(char *); static void ppc_probe_base_platform(void); typedef int (*probe_func_t) (char *); probe_func_t probe_platforms[] = { probe_ppc44x_platform, /* 44x chipsets */ probe_ppce500_platform, /* E500 chipsets */ probe_default_platform, /* This should be at the end */ NULL }; /* Don't forget page flags definitions for each platform */ #define PLATFORM_PAGE_FLAGS_SETUP(PLT) \ do { \ _PAGE_PRESENT = PLT##_PAGE_PRESENT; \ _PAGE_USER = PLT##_PAGE_USER; \ _PAGE_RW = PLT##_PAGE_RW; \ _PAGE_GUARDED = PLT##_PAGE_GUARDED; \ _PAGE_COHERENT = PLT##_PAGE_COHERENT; \ _PAGE_NO_CACHE = PLT##_PAGE_NO_CACHE; \ _PAGE_WRITETHRU = PLT##_PAGE_WRITETHRU; \ _PAGE_DIRTY = PLT##_PAGE_DIRTY; \ _PAGE_ACCESSED = PLT##_PAGE_ACCESSED; \ _PAGE_HWWRITE = PLT##_PAGE_HWWRITE; \ _PAGE_SHARED = PLT##_PAGE_SHARED; \ } while (0) static int probe_ppc44x_platform(char *name) { /* 44x include ppc440* and ppc470 */ if (STRNEQ(name, "ppc440") || STREQ(name, "ppc470")) { PPC_PLATFORM = strdup(name); PLATFORM_PAGE_FLAGS_SETUP(PPC44x); return TRUE; } return FALSE; } struct fsl_booke_tlbcam { #define NUM_TLBCAMS (64) #define LAST_TLBCAM (0x40) uint index; struct { ulong start; ulong limit; physaddr_t phys; } tlbcamrange; struct { uint MAS0; uint MAS1; ulong MAS2; uint MAS3; uint MAS7; } tlbcam; }; static int fsl_booke_vtop(ulong vaddr, physaddr_t *paddr, int verbose) { struct fsl_booke_tlbcam *fsl_mmu; int i, found; if (CRASHDEBUG(1)) fprintf(fp, "[Searching tlbcam address mapping]\n"); fsl_mmu = MMU_SPECIAL; for (i = 0, found = FALSE;;i++, fsl_mmu++) { if (vaddr >= fsl_mmu->tlbcamrange.start && vaddr < fsl_mmu->tlbcamrange.limit) { *paddr = fsl_mmu->tlbcamrange.phys + (vaddr - fsl_mmu->tlbcamrange.start); found = TRUE; break; } if (fsl_mmu->index & LAST_TLBCAM) break; } if (found && verbose) { /* TLBCAM segment attributes */ fprintf(fp, "\n TLBCAM[%u]: MAS0 MAS1 MAS2 " "MAS3 MAS7\n", (fsl_mmu->index & ~LAST_TLBCAM)); fprintf(fp, " %-8x %-8x %-8lx %-8x %-8x\n", fsl_mmu->tlbcam.MAS0, fsl_mmu->tlbcam.MAS1, fsl_mmu->tlbcam.MAS2, fsl_mmu->tlbcam.MAS3, fsl_mmu->tlbcam.MAS7); /* TLBCAM range */ fprintf(fp, " VIRTUAL RANGE : %lx - %lx\n", fsl_mmu->tlbcamrange.start, fsl_mmu->tlbcamrange.limit); fprintf(fp, " PHYSICAL RANGE: %llx - %llx\n", fsl_mmu->tlbcamrange.phys, fsl_mmu->tlbcamrange.phys + (fsl_mmu->tlbcamrange.limit - fsl_mmu->tlbcamrange.start)); /* translated addr and its tlbcam's offset. */ fprintf(fp, " => VIRTUAL PHYSICAL TLBCAM-OFFSET\n"); fprintf(fp, " %-8lx %-8llx %lu\n", vaddr, *paddr, vaddr - fsl_mmu->tlbcamrange.start); } if (CRASHDEBUG(1)) fprintf(fp, "[tlbcam search end]\n"); return found; } static void fsl_booke_mmu_setup(void) { struct fsl_booke_tlbcam *fsl_mmu; uint i, tlbcam_index; ulong tlbcam_addrs, TLBCAM; readmem(symbol_value("tlbcam_index"), KVADDR, &tlbcam_index, sizeof(uint), "tlbcam_index", FAULT_ON_ERROR); if (tlbcam_index != 0 && tlbcam_index < NUM_TLBCAMS) { fsl_mmu = calloc(tlbcam_index, sizeof(*fsl_mmu)); if (!fsl_mmu) { error(FATAL, "fsl_mmu calloc() failed\n"); return; } tlbcam_addrs = symbol_value("tlbcam_addrs"); TLBCAM = symbol_value("TLBCAM"); for (i = 0; i < tlbcam_index; i++) { fsl_mmu[i].index = i; readmem(tlbcam_addrs + i * sizeof(fsl_mmu[i].tlbcamrange), KVADDR, &fsl_mmu[i].tlbcamrange, sizeof(fsl_mmu[i].tlbcamrange), "tlbcam_addrs", FAULT_ON_ERROR); readmem(TLBCAM + i * sizeof(fsl_mmu[i].tlbcam), KVADDR, &fsl_mmu[i].tlbcam, sizeof(fsl_mmu[i].tlbcam), "TLBCAM", FAULT_ON_ERROR); } fsl_mmu[i - 1].index |= LAST_TLBCAM; MMU_SPECIAL = fsl_mmu; VTOP_SPECIAL = fsl_booke_vtop; } else error(INFO, "[%s]: can't setup tlbcam: tlbcam_index=%u\n", PPC_PLATFORM, tlbcam_index); } static int probe_ppce500_platform(char *name) { if (STRNEQ(name, "ppce500mc")) { PPC_PLATFORM = strdup(name); if (IS_PAE()) { PTE_RPN_SHIFT = BOOKE3E_PTE_RPN_SHIFT; PLATFORM_PAGE_FLAGS_SETUP(BOOK3E); /* Set special flag for book3e */ _PAGE_K_RW = BOOK3E_PAGE_KERNEL_RW; } else PLATFORM_PAGE_FLAGS_SETUP(FSL_BOOKE); fsl_booke_mmu_setup(); return TRUE; } return FALSE; } static int probe_default_platform(char *name) { if (IS_PAE()) { error(INFO, "platform \"%s\" 64bit PTE fall through\n", name); error(INFO, "vmalloc translation could not work!\n"); } /* Use the default definitions */ PPC_PLATFORM = strdup(name); PLATFORM_PAGE_FLAGS_SETUP(DEFAULT); return TRUE; } #undef PLATFORM_PAGE_FLAGS_SETUP /* * Find the platform of the crashing system and set the * base_platform accordingly. */ void ppc_probe_base_platform(void) { probe_func_t probe; char platform_name[MAX_PLATFORM_LEN]; ulong ptr; int i; if(!try_get_symbol_data("powerpc_base_platform", sizeof(ulong), &ptr) || read_string(ptr, platform_name, MAX_PLATFORM_LEN - 1) == 0) /* Let us fallback to default definitions */ strcpy(platform_name, "(unknown)"); for (i = 0; probe_platforms[i] != NULL; i++) { probe = probe_platforms[i]; if (probe(platform_name)) break; } } /* * Do all necessary machine-specific setup here. This is called twice, * before and after GDB has been initialized. */ void ppc_init(int when) { uint cpu_features; ulong cur_cpu_spec; struct datatype_member pte = { .name = "pte_t", }; switch (when) { case SETUP_ENV: machdep->machspec = &ppc_machine_specific; machdep->process_elf_notes = process_elf32_notes; break; case PRE_SYMTAB: machdep->verify_symbol = ppc_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = PPC_STACK_SIZE; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); machdep->pmd = machdep->pgd; if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; break; case PRE_GDB: machdep->kvbase = symbol_value("_stext"); machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ppc_eframe_search; machdep->back_trace = ppc_back_trace_cmd; machdep->processor_speed = ppc_processor_speed; machdep->uvtop = ppc_uvtop; machdep->kvtop = ppc_kvtop; machdep->get_task_pgd = ppc_get_task_pgd; machdep->get_stack_frame = ppc_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = ppc_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = ppc_is_task_addr; machdep->dis_filter = ppc_dis_filter; machdep->cmd_mach = ppc_cmd_mach; machdep->get_smp_cpus = ppc_get_smp_cpus; machdep->line_number_hooks = ppc_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: /* gdb interface got available, resolve PTE right now. */ PTE_SIZE = DATATYPE_SIZE(&pte); if (PTE_SIZE < 0) error(FATAL, "gdb could not handle \"pte_t\" size request\n"); /* Check if we have 64bit PTE on 32bit system */ if (PTE_SIZE == sizeof(ulonglong)) machdep->flags |= PAE; /* Find the platform where we crashed */ ppc_probe_base_platform(); if (!PTE_RPN_SHIFT) PTE_RPN_SHIFT = PAGE_SHIFT; machdep->vmalloc_start = ppc_vmalloc_start; MEMBER_OFFSET_INIT(thread_struct_pg_tables, "thread_struct", "pg_tables"); if (VALID_SIZE(irq_desc_t)) { /* * Use generic irq handlers for recent kernels whose * irq_desc_t have been initialized in kernel_init(). */ machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; } else { machdep->dump_irq = ppc_dump_irq; STRUCT_SIZE_INIT(irqdesc, "irqdesc"); STRUCT_SIZE_INIT(irq_desc_t, "irq_desc_t"); MEMBER_OFFSET_INIT(irqdesc_action, "irqdesc", "action"); MEMBER_OFFSET_INIT(irqdesc_ctl, "irqdesc", "ctl"); MEMBER_OFFSET_INIT(irqdesc_level, "irqdesc", "level"); } MEMBER_OFFSET_INIT(device_node_type, "device_node", "type"); MEMBER_OFFSET_INIT(device_node_allnext, "device_node", "allnext"); MEMBER_OFFSET_INIT(device_node_properties, "device_node", "properties"); MEMBER_OFFSET_INIT(property_name, "property", "name"); MEMBER_OFFSET_INIT(property_value, "property", "value"); MEMBER_OFFSET_INIT(property_next, "property", "next"); MEMBER_OFFSET_INIT(machdep_calls_setup_residual, "machdep_calls", "setup_residual"); MEMBER_OFFSET_INIT(RESIDUAL_VitalProductData, "RESIDUAL", "VitalProductData"); MEMBER_OFFSET_INIT(VPD_ProcessorHz, "VPD", "ProcessorHz"); MEMBER_OFFSET_INIT(bd_info_bi_intfreq, "bd_info", "bi_intfreq"); if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(int), &machdep->nr_irqs); else machdep->nr_irqs = 512; /* NR_IRQS (at least) */ if (!machdep->hz) { machdep->hz = HZ; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) machdep->hz = 1000; } if (symbol_exists("cur_cpu_spec")) { get_symbol_data("cur_cpu_spec", sizeof(void *), &cur_cpu_spec); readmem(cur_cpu_spec + MEMBER_OFFSET("cpu_spec", "cpu_user_features"), KVADDR, &cpu_features, sizeof(uint), "cpu user features", FAULT_ON_ERROR); if (cpu_features & CPU_BOOKE) machdep->flags |= CPU_BOOKE; } else machdep->flags |= CPU_BOOKE; machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; /* * IRQ stacks are introduced in 2.6 and also configurable. */ if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && symbol_exists("hardirq_ctx")) STRUCT_SIZE_INIT(irq_ctx, "hardirq_ctx"); STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); break; case POST_INIT: break; case LOG_ONLY: machdep->kvbase = kt->vmcoreinfo._stext_SYMBOL; break; } } void ppc_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " platform: %s\n", PPC_PLATFORM); fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PAE) fprintf(fp, "%sPAE", others++ ? "|" : ""); if (machdep->flags & CPU_BOOKE) fprintf(fp, "%sCPU_BOOKE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " pgdir_shift: %d\n", PGDIR_SHIFT); fprintf(fp, " ptrs_per_pgd: %d\n", PTRS_PER_PGD); fprintf(fp, " ptrs_per_pte: %d\n", PTRS_PER_PTE); fprintf(fp, " pte_size: %d\n", PTE_SIZE); fprintf(fp, " pte_rpn_shift: %d\n", PTE_RPN_SHIFT); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: ppc_eframe_search() [TBD]\n"); fprintf(fp, " back_trace: ppc_back_trace_cmd()\n"); fprintf(fp, " processor_speed: ppc_processor_speed()\n"); fprintf(fp, " uvtop: ppc_uvtop()\n"); fprintf(fp, " kvtop: ppc_kvtop()\n"); fprintf(fp, " get_task_pgd: ppc_get_task_pgd()\n"); if (machdep->dump_irq == generic_dump_irq) fprintf(fp, " dump_irq: generic_dump_irq()\n"); else fprintf(fp, " dump_irq: ppc_dump_irq()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " get_stack_frame: ppc_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: ppc_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: ppc_vmalloc_start()\n"); fprintf(fp, " is_task_addr: ppc_is_task_addr()\n"); fprintf(fp, " verify_symbol: ppc_verify_symbol()\n"); fprintf(fp, " dis_filter: ppc_dis_filter()\n"); fprintf(fp, " cmd_mach: ppc_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: ppc_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: ppc_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } static ulonglong ppc_pte_physaddr(ulonglong pte) { pte = pte >> PTE_RPN_SHIFT; /* pfn */ pte = pte << PAGE_SHIFT; /* physaddr */ return pte; } static int ppc_pgd_vtop(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *page_dir; ulong pgd_pte, page_table, pte_index; ulonglong pte; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (vaddr >> PGDIR_SHIFT); /* * Size of a pgd could be more than a PAGE. * So use PAGEBASE(page_dir), instead of * PAGEBASE(pgd) for FILL_PGD() */ FILL_PGD(PAGEBASE((ulong)page_dir), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET((ulong)page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!pgd_pte) { if (VTOP_SPECIAL) /* * This ppc platform have special address mapping * between vaddr and paddr which can not search from * standard page table. */ return VTOP_SPECIAL(vaddr, paddr, verbose); goto no_page; } page_table = pgd_pte; if (IS_BOOKE()) page_table = VTOP(page_table); FILL_PTBL(PAGEBASE((ulong)page_table), PHYSADDR, PAGESIZE()); pte_index = (vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); if (IS_PAE()) pte = ULONGLONG(machdep->ptbl + PTE_SIZE * pte_index); else pte = ULONG(machdep->ptbl + PTE_SIZE * pte_index); if (verbose) fprintf(fp, " PTE: %lx => %llx\n", pgd_pte, pte); if (!(pte & _PAGE_PRESENT)) { if (pte && verbose) { fprintf(fp, "\n"); ppc_translate_pte((ulong)pte, 0, pte); } goto no_page; } if (verbose) { fprintf(fp, " PAGE: %llx\n\n", PAGEBASE(ppc_pte_physaddr(pte))); ppc_translate_pte((ulong)pte, 0, pte); } *paddr = PAGEBASE(ppc_pte_physaddr(pte)) + PAGEOFFSET(vaddr); return TRUE; no_page: return FALSE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ static int ppc_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_pg_tables)) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no pg_tables or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } return ppc_pgd_vtop(pgd, vaddr, paddr, verbose); } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int ppc_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } pgd = (ulong *)vt->kernel_pgd[0]; return ppc_pgd_vtop(pgd, kvaddr, paddr, verbose); } /* * Determine where vmalloc'd memory starts by looking at the first * entry on the vmlist. */ static ulong ppc_vmalloc_start(void) { return (first_vmalloc_address()); } /* * PPC tasks are all stacksize-aligned, except when split from the stack. * PPC also allows the idle_task to be non-page aligned, so we have to make * an additional check through the idle_threads array. */ static int ppc_is_task_addr(ulong task) { int i; if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else if (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)) return TRUE; for (i = 0; i < kt->cpus; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * According to kernel source, this should cover all the PPC variants out * There, but since we can't test them all, YMMV. */ static ulong ppc_processor_speed(void) { ulong res, value, ppc_md, md_setup_res; ulong prep_setup_res; ulong node, type, name, properties; char str_buf[32]; ulong len, mhz = 0; if (machdep->mhz) return(machdep->mhz); if(symbol_exists("allnodes")) { get_symbol_data("allnodes", sizeof(void *), &node); while(node) { readmem(node+OFFSET(device_node_type), KVADDR, &type, sizeof(ulong), "node type", FAULT_ON_ERROR); if(type != 0) { len = read_string(type, str_buf, sizeof(str_buf)); if(len && (strcasecmp(str_buf, "cpu") == 0)) break; } readmem(node+OFFSET(device_node_allnext), KVADDR, &node, sizeof(ulong), "node allnext", FAULT_ON_ERROR); } /* now, if we found a CPU node, get the speed property */ if(node) { readmem(node+OFFSET(device_node_properties), KVADDR, &properties, sizeof(ulong), "node properties", FAULT_ON_ERROR); while(properties) { readmem(properties+OFFSET(property_name), KVADDR, &name, sizeof(ulong), "property name", FAULT_ON_ERROR); len = read_string(name, str_buf, sizeof(str_buf)); if (len && (strcasecmp(str_buf, "clock-frequency") == 0)) { /* found the right cpu property */ readmem(properties+ OFFSET(property_value), KVADDR, &value, sizeof(ulong), "clock freqency pointer", FAULT_ON_ERROR); readmem(value, KVADDR, &mhz, sizeof(ulong), "clock frequency value", FAULT_ON_ERROR); mhz /= 1000000; break; } else if(len && (strcasecmp(str_buf, "ibm,extended-clock-frequency") == 0)){ /* found the right cpu property */ readmem(properties+ OFFSET(property_value), KVADDR, &value, sizeof(ulong), "clock freqency pointer", FAULT_ON_ERROR); readmem(value, KVADDR, &mhz, sizeof(ulong), "clock frequency value", FAULT_ON_ERROR); mhz /= 1000000; break; } /* keep looking */ readmem(properties+ OFFSET(property_next), KVADDR, &properties, sizeof(ulong), "property next", FAULT_ON_ERROR); } if(!properties) { /* didn't find the cpu speed for some reason */ return (machdep->mhz = 0); } } } /* for machines w/o OF */ /* untested, but in theory this should work on prep machines */ if (symbol_exists("res") && !mhz) { get_symbol_data("res", sizeof(void *), &res); if (symbol_exists("prep_setup_residual")) { get_symbol_data("prep_setup_residual", sizeof(void *), &prep_setup_res); get_symbol_data("ppc_md", sizeof(void *), &ppc_md); readmem(ppc_md + OFFSET(machdep_calls_setup_residual), KVADDR, &md_setup_res, sizeof(ulong), "ppc_md setup_residual", FAULT_ON_ERROR); if(prep_setup_res == md_setup_res) { /* PREP machine */ readmem(res+ OFFSET(RESIDUAL_VitalProductData)+ OFFSET(VPD_ProcessorHz), KVADDR, &mhz, sizeof(ulong), "res VitalProductData", FAULT_ON_ERROR); mhz = (mhz > 1024) ? mhz >> 20 : mhz; } } if(!mhz) { /* everything else seems to do this the same way... */ readmem(res + OFFSET(bd_info_bi_intfreq), KVADDR, &mhz, sizeof(ulong), "bd_info bi_intfreq", FAULT_ON_ERROR); mhz /= 1000000; } } /* else...well, we don't have OF, or a residual structure, so * just print unknown MHz */ return (machdep->mhz = mhz); } /* * Accept or reject a symbol from the kernel namelist. */ static int ppc_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "_start")) machdep->flags |= KSYMS_START; return (name && strlen(name) && (machdep->flags & KSYMS_START) && !STREQ(name, "Letext") && !STRNEQ(name, "__func__.")); } /* * Get the relevant page directory pointer from a task structure. */ static ulong ppc_get_task_pgd(ulong task) { long offset; ulong pg_tables; offset = VALID_MEMBER(task_struct_thread) ? OFFSET(task_struct_thread) : OFFSET(task_struct_tss); if (INVALID_MEMBER(thread_struct_pg_tables)) error(FATAL, "pg_tables does not exist in this kernel's thread_struct\n"); offset += OFFSET(thread_struct_pg_tables); readmem(task + offset, KVADDR, &pg_tables, sizeof(ulong), "task thread pg_tables", FAULT_ON_ERROR); return(pg_tables); } /* * Translate a PTE, returning TRUE if the page is _PAGE_PRESENT. * If a physaddr pointer is passed in, don't print anything. */ static int ppc_translate_pte(ulong pte32, void *physaddr, ulonglong pte64) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; ulonglong paddr; if (!IS_PAE()) pte64 = pte32; paddr = PAGEBASE(ppc_pte_physaddr(pte64)); page_present = (pte64 & _PAGE_PRESENT); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%llx", pte64); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte64) { swap_location(pte64, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte64) { if (_PAGE_PRESENT && (pte64 & _PAGE_PRESENT) == _PAGE_PRESENT) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (_PAGE_USER && (pte64 & _PAGE_USER) == _PAGE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (_PAGE_RW && (pte64 & _PAGE_RW) == _PAGE_RW) fprintf(fp, "%sRW", others++ ? "|" : ""); if (_PAGE_K_RW && ((pte64 & _PAGE_K_RW) == _PAGE_K_RW)) fprintf(fp, "%sK-RW", others++ ? "|" : ""); if (_PAGE_GUARDED && (pte64 & _PAGE_GUARDED) == _PAGE_GUARDED) fprintf(fp, "%sGUARDED", others++ ? "|" : ""); if (_PAGE_COHERENT && (pte64 & _PAGE_COHERENT) == _PAGE_COHERENT) fprintf(fp, "%sCOHERENT", others++ ? "|" : ""); if (_PAGE_NO_CACHE && (pte64 & _PAGE_NO_CACHE) == _PAGE_NO_CACHE) fprintf(fp, "%sNO_CACHE", others++ ? "|" : ""); if (_PAGE_WRITETHRU && (pte64 & _PAGE_WRITETHRU) == _PAGE_WRITETHRU) fprintf(fp, "%sWRITETHRU", others++ ? "|" : ""); if (_PAGE_DIRTY && (pte64 & _PAGE_DIRTY) == _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (_PAGE_ACCESSED && (pte64 & _PAGE_ACCESSED) == _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); if (_PAGE_HWWRITE && (pte64 & _PAGE_HWWRITE) == _PAGE_HWWRITE) fprintf(fp, "%sHWWRITE", others++ ? "|" : ""); } else fprintf(fp, "no mapping"); fprintf(fp, ")\n"); return page_present; } /* * Look for likely exception frames in a stack. */ static int ppc_eframe_search(struct bt_info *bt) { return (error(FATAL, "ppc_eframe_search: function not written yet!\n")); } static ulong ppc_in_irqstack(ulong addr) { int c; if (!(tt->flags & IRQSTACKS)) return 0; for (c = 0; c < kt->cpus; c++) { if (tt->hardirq_ctx[c]) { if ((addr >= tt->hardirq_ctx[c]) && (addr < (tt->hardirq_ctx[c] + SIZE(irq_ctx)))) return tt->hardirq_ctx[c]; } if (tt->softirq_ctx[c]) { if ((addr >= tt->softirq_ctx[c]) && (addr < (tt->softirq_ctx[c] + SIZE(irq_ctx)))) return tt->softirq_ctx[c]; } } return 0; } /* * Unroll a kernel stack. */ static void ppc_back_trace_cmd(struct bt_info *bt) { char buf[BUFSIZE]; struct gnu_request *req; bt->flags |= BT_EXCEPTION_FRAME; if (CRASHDEBUG(1) || bt->debug) fprintf(fp, " => PC: %lx (%s) FP: %lx \n", bt->instptr, value_to_symstr(bt->instptr, buf, 0), bt->stkptr); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_STACK_TRACE; req->flags = GNU_RETURN_ON_ERROR; req->buf = GETBUF(BUFSIZE); req->debug = bt->debug; req->task = bt->task; req->pc = bt->instptr; req->sp = bt->stkptr; if (bt->flags & BT_USE_GDB) { strcpy(req->buf, "backtrace"); gdb_interface(req); } else ppc_back_trace(req, bt); FREEBUF(req->buf); FREEBUF(req); } /* * Unroll the kernel stack using a minimal amount of gdb services. */ static void ppc_back_trace(struct gnu_request *req, struct bt_info *bt) { int frame = 0; ulong lr = 0; ulong newpc = 0, newsp, marker; int eframe_found; if (!INSTACK(req->sp, bt)) { ulong irqstack; if ((irqstack = ppc_in_irqstack(req->sp))) { bt->stackbase = irqstack; bt->stacktop = bt->stackbase + SIZE(irq_ctx); alter_stackbuf(bt); } else { if (CRASHDEBUG(1)) fprintf(fp, "cannot find the stack info.\n"); return; } } while (INSTACK(req->sp, bt)) { newsp = *(ulong *)&bt->stackbuf[req->sp - bt->stackbase]; if (IS_KVADDR(newsp) && INSTACK(newsp, bt)) newpc = *(ulong *)&bt->stackbuf[newsp + STACK_FRAME_LR_SAVE - bt->stackbase]; if ((req->name = closest_symbol(req->pc)) == NULL) { error(FATAL, "ppc_back_trace hit unknown symbol (%lx).\n", req->pc); break; } bt->flags |= BT_SAVE_LASTSP; ppc_print_stack_entry(frame, req, newsp, lr, bt); bt->flags &= ~(ulonglong)BT_SAVE_LASTSP; lr = 0; if (BT_REFERENCE_FOUND(bt)) return; eframe_found = FALSE; /* * Is this frame an execption one? * In 2.6, 0x72656773 is saved and used * to determine the execption frame. */ if (THIS_KERNEL_VERSION < LINUX(2,6,0)) { if (frame && (newsp - req->sp - STACK_FRAME_OVERHEAD >= sizeof(struct ppc_pt_regs))) /* there might be an exception frame here... */ eframe_found = TRUE; /* also possible ones here... */ else if(!IS_KVADDR(newsp) || (newsp < req->sp)) eframe_found = TRUE; else if (STREQ(req->name, ".ret_from_except")) eframe_found = TRUE; } else if ((newsp - req->sp - STACK_FRAME_OVERHEAD) >= sizeof(struct ppc_pt_regs)){ readmem(req->sp + STACK_FRAME_MARKER, KVADDR, &marker, sizeof(ulong), "frame marker", FAULT_ON_ERROR); if (marker == STACK_FRAME_REGS_MARKER) eframe_found = TRUE; } if (eframe_found) { char *efrm_str; struct ppc_pt_regs regs; readmem(req->sp + STACK_FRAME_OVERHEAD, KVADDR, ®s, sizeof(struct ppc_pt_regs), "exception frame", FAULT_ON_ERROR); efrm_str = ppc_check_eframe(®s); if (efrm_str) { ppc_print_eframe(efrm_str, ®s, bt); lr = regs.link; newpc = regs.nip; newsp = regs.gpr[1]; } } if (STREQ(req->name, "start_kernel")) break; req->pc = newpc; req->sp = newsp; frame++; } return; } static void ppc_display_full_frame(struct bt_info *bt, ulong nextsp, FILE *ofp) { int i, u_idx; ulong *nip; ulong words, addr; char buf[BUFSIZE]; if (!INSTACK(nextsp, bt)) nextsp = bt->stacktop; words = (nextsp - bt->frameptr) / sizeof(ulong); addr = bt->frameptr; u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); nip = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(ofp, "%s ", format_stack_entry(bt, buf, *nip, 0)); addr += sizeof(ulong); } fprintf(ofp, "\n"); } /* * print one entry of a stack trace */ static void ppc_print_stack_entry(int frame, struct gnu_request *req, ulong newsp, ulong lr, struct bt_info *bt) { struct load_module *lm; char *lrname = NULL; ulong offset; struct syment *sp; char *name_plus_offset; char buf[BUFSIZE]; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(req->name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; break; case BT_REF_HEXVAL: if (bt->ref->hexval == req->pc) bt->ref->cmdflags |= BT_REF_FOUND; break; } } else { name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(req->pc, &offset); if (sp && offset) name_plus_offset = value_to_symstr(req->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%lx] %s at %lx", frame < 10 ? " " : "", frame, req->sp, name_plus_offset ? name_plus_offset : req->name, req->pc); if (module_symbol(req->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); if (req->ra) { /* * Previous frame is an exception one. If the func * symbol for the current frame is same as with * the previous frame's LR value, print "(unreliable)". */ lrname = closest_symbol(req->ra); req->ra = 0; if (!lrname) { if (CRASHDEBUG(1)) error(FATAL, "ppc_back_trace hit unknown symbol (%lx).\n", req->ra); return; } } if (lr) { /* * Link register value for an expection frame. */ if ((lrname = closest_symbol(lr)) == NULL) { if (CRASHDEBUG(1)) error(FATAL, "ppc_back_trace hit unknown symbol (%lx).\n", lr); return; } if (req->pc != lr) { fprintf(fp, "\n [Link Register ] "); fprintf(fp, " [%lx] %s at %lx", req->sp, lrname, lr); } req->ra = lr; } if (!req->name || STREQ(req->name,lrname)) fprintf(fp, " (unreliable)"); fprintf(fp, "\n"); } if (bt->flags & BT_SAVE_LASTSP) req->lastsp = req->sp; bt->frameptr = req->sp; if (bt->flags & BT_FULL) if (IS_KVADDR(newsp)) ppc_display_full_frame(bt, newsp, fp); if (bt->flags & BT_LINE_NUMBERS) ppc_dump_line_number(req->pc); } /* * Check whether the frame is exception one! */ static char * ppc_check_eframe(struct ppc_pt_regs *regs) { switch(regs->trap & ~0xF) { case 0x200: return "machine check"; case 0x300: return "address error (store)"; case 0x400: return "instruction bus error"; case 0x500: return "interrupt"; case 0x600: return "alingment"; case 0x700: return "breakpoint trap"; case 0x800: return "fpu unavailable"; case 0x900: return "decrementer"; case 0xa00: return "reserved"; case 0xb00: return "reserved"; case 0xc00: return "syscall"; case 0xd00: return "single-step/watch"; case 0xe00: return "fp assist"; } /* No exception frame exists */ return NULL; } static void ppc_print_regs(struct ppc_pt_regs *regs) { int i; /* print out the gprs... */ for(i=0; i<32; i++) { if(!(i % 4)) fprintf(fp, "\n"); fprintf(fp, "R%d:%s %08lx ", i, ((i < 10) ? " " : ""), regs->gpr[i]); /* * In 2.6, some stack frame contains only partial regs set. * For the partial set, only 14 regs will be saved and trap * field will contain 1 in the least significant bit. */ if ((i == 13) && (regs->trap & 1)) break; } fprintf(fp, "\n"); /* print out the rest of the registers */ fprintf(fp, "NIP: %08lx ", regs->nip); fprintf(fp, "MSR: %08lx ", regs->msr); fprintf(fp, "OR3: %08lx ", regs->orig_gpr3); fprintf(fp, "CTR: %08lx\n", regs->ctr); fprintf(fp, "LR: %08lx ", regs->link); fprintf(fp, "XER: %08lx ", regs->xer); fprintf(fp, "CCR: %08lx ", regs->ccr); fprintf(fp, "MQ: %08lx\n", regs->mq); fprintf(fp, "DAR: %08lx ", regs->dar); fprintf(fp, "DSISR: %08lx ", regs->dsisr); fprintf(fp, " Syscall Result: %08lx\n", regs->result); } /* * Print the exception frame information */ static void ppc_print_eframe(char *efrm_str, struct ppc_pt_regs *regs, struct bt_info *bt) { if (BT_REFERENCE_CHECK(bt)) return; fprintf(fp, " %s [%lx] exception frame:", efrm_str, regs->trap); ppc_print_regs(regs); fprintf(fp, "\n"); } static void ppc_kdump_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { struct ppc_pt_regs *pt_regs; unsigned long ip, sp; ip = sp = 0; pt_regs = (struct ppc_pt_regs*)bt->machdep; if (!pt_regs || !(pt_regs->gpr[1])) { fprintf(fp, "0%lx: GPR1 register value(SP) was not saved\n", bt->task); return; } sp = pt_regs->gpr[1]; if (!IS_KVADDR(sp)) { if (IN_TASK_VMA(bt->task, *ksp)) fprintf(fp, "%0lx: Task is running in user space\n", bt->task); else fprintf(fp, "%0lx: Invalid Stack Pointer %0lx\n", bt->task, *ksp); } ip = pt_regs->nip; if(nip) *nip = ip; if (ksp) *ksp = sp; if (bt->flags && ((BT_TEXT_SYMBOLS | BT_TEXT_SYMBOLS_PRINT | BT_TEXT_SYMBOLS_NOPRINT))) return; /* * Print the collected regs for the active task */ ppc_print_regs(pt_regs); if (!IS_KVADDR(sp)) return; fprintf(fp, " NIP [%016lx] %s\n", pt_regs->nip, closest_symbol(pt_regs->nip)); fprintf(fp, " LR [%016lx] %s\n", pt_regs->link, closest_symbol(pt_regs->link)); fprintf(fp, "\n"); return; } static void ppc_dumpfile_stack_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { struct syment *sp; /* * For KDUMP and compressed KDUMP get the SP, PC from pt_regs * read from the Elf Note. */ if (ELF_NOTES_VALID()) { ppc_kdump_stack_frame(bt, getpc, getsp); return; } if (getpc) { if (!(sp = next_symbol("crash_save_current_state", NULL))) *getpc = (symbol_value("crash_save_current_state")+16); else *getpc = (sp->value - 4); } } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void ppc_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (DUMPFILE() && is_task_active(bt->task)) ppc_dumpfile_stack_frame(bt, pcp, spp); else get_ppc_frame(bt, pcp, spp); } /* * Do the work for ppc_get_stack_frame() for non-active tasks */ static void get_ppc_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { ulong ip; ulong sp; ulong *stack; ulong task; struct ppc_pt_regs regs; ip = 0; task = bt->task; stack = (ulong *)bt->stackbuf; if ((tt->flags & THREAD_INFO) && VALID_MEMBER(task_struct_thread_ksp)) readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, &sp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); else if (VALID_MEMBER(task_struct_tss_ksp)) sp = stack[OFFSET(task_struct_tss_ksp)/sizeof(long)]; else sp = stack[OFFSET(task_struct_thread_ksp)/sizeof(long)]; if (!INSTACK(sp, bt)) goto out; readmem(sp + STACK_FRAME_OVERHEAD, KVADDR, ®s, sizeof(struct ppc_pt_regs), "PPC pt_regs", FAULT_ON_ERROR); ip = regs.nip; if (STREQ(closest_symbol(ip), "__switch_to")) { /* NOTE: _switch_to() calls _switch() which * is asm. _switch leaves pc == lr. * Working through this frame is tricky, * and this mess isn't going to help if we * actually dumped here. Most likely the * analyzer is trying to backtrace a task. * Need to skip 2 frames. */ sp = stack[(sp - bt->stackbase)/sizeof(ulong)]; if (!INSTACK(sp, bt)) goto out; sp = stack[(sp - bt->stackbase)/sizeof(ulong)]; if (!INSTACK(sp + 4, bt)) goto out; ip = stack[(sp + 4 - bt->stackbase)/sizeof(ulong)]; } out: if (DUMPFILE() && getsp && STREQ(closest_symbol(sp), "panic")) { *getsp = sp; return; } if (getsp) *getsp = sp; if (getpc) *getpc = ip; } /* * Do the work for cmd_irq(). */ static void ppc_dump_irq(int irq) { ulong irq_desc_addr, addr; int level, others; ulong action, ctl, value; char typename[32]; int len; irq_desc_addr = symbol_value("irq_desc") + (SIZE(irqdesc) * irq); readmem(irq_desc_addr + OFFSET(irqdesc_level), KVADDR, &level, sizeof(int), "irq_desc entry", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irqdesc_action), KVADDR, &action, sizeof(long), "irq_desc entry", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irqdesc_ctl), KVADDR, &ctl, sizeof(long), "irq_desc entry", FAULT_ON_ERROR); fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, " STATUS: 0\n"); fprintf(fp, "HANDLER: "); if (value_symbol(ctl)) { fprintf(fp, "%lx ", ctl); pad_line(fp, VADDR_PRLEN == 8 ? VADDR_PRLEN+2 : VADDR_PRLEN-6, ' '); fprintf(fp, "<%s>\n", value_symbol(ctl)); } else fprintf(fp, "%lx\n", ctl); if(ctl) { /* typename */ readmem(ctl + OFFSET(hw_interrupt_type_typename), KVADDR, &addr, sizeof(ulong), "typename pointer", FAULT_ON_ERROR); len = read_string(addr, typename, 32); if(len) fprintf(fp, " typename: %08lx \"%s\"\n", addr, typename); /* startup...I think this is always 0 */ readmem(ctl + OFFSET(hw_interrupt_type_startup), KVADDR, &addr, sizeof(ulong), "interrupt startup", FAULT_ON_ERROR); fprintf(fp, " startup: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); /* shutdown...I think this is always 0 */ readmem(ctl + OFFSET(hw_interrupt_type_shutdown), KVADDR, &addr, sizeof(ulong), "interrupt shutdown", FAULT_ON_ERROR); fprintf(fp, " shutdown: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); if (VALID_MEMBER(hw_interrupt_type_handle)) { /* handle */ readmem(ctl + OFFSET(hw_interrupt_type_handle), KVADDR, &addr, sizeof(ulong), "interrupt handle", FAULT_ON_ERROR); fprintf(fp, " handle: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); } /* enable/disable */ readmem(ctl + OFFSET(hw_interrupt_type_enable), KVADDR, &addr, sizeof(ulong), "interrupt enable", FAULT_ON_ERROR); fprintf(fp, " enable: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); readmem(ctl + OFFSET(hw_interrupt_type_disable), KVADDR, &addr, sizeof(ulong), "interrupt disable", FAULT_ON_ERROR); fprintf(fp, " disable: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "0\n"); } /* next, the action... and its submembers */ if(!action) fprintf(fp, " ACTION: (none)\n"); while(action) { fprintf(fp, " ACTION: %08lx\n", action); /* handler */ readmem(action + OFFSET(irqaction_handler), KVADDR, &addr, sizeof(ulong), "action handler", FAULT_ON_ERROR); fprintf(fp, " handler: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "0\n"); /* flags */ readmem(action + OFFSET(irqaction_flags), KVADDR, &value, sizeof(ulong), "action flags", FAULT_ON_ERROR); fprintf(fp, " flags: %lx ", value); if (value) { others = 0; fprintf(fp, "("); if (value & SA_INTERRUPT) fprintf(fp, "%sSA_INTERRUPT", others++ ? "|" : ""); if (value & SA_PROBE) fprintf(fp, "%sSA_PROBE", others++ ? "|" : ""); if (value & SA_SAMPLE_RANDOM) fprintf(fp, "%sSA_SAMPLE_RANDOM", others++ ? "|" : ""); if (value & SA_SHIRQ) fprintf(fp, "%sSA_SHIRQ", others++ ? "|" : ""); fprintf(fp, ")"); if (value & ~ACTION_FLAGS) { fprintf(fp, " (bits %lx not translated)", value & ~ACTION_FLAGS); } } fprintf(fp, "\n"); /* mask */ readmem(action + OFFSET(irqaction_mask), KVADDR, &value, sizeof(ulong), "action mask", FAULT_ON_ERROR); fprintf(fp, " mask: %lx\n", value); /* name */ readmem(action + OFFSET(irqaction_name), KVADDR, &addr, sizeof(ulong), "action name", FAULT_ON_ERROR); len = read_string(addr, typename, 32); if(len) fprintf(fp, " name: %08lx \"%s\"\n", addr, typename); /* dev_id */ readmem(action + OFFSET(irqaction_dev_id), KVADDR, &value, sizeof(ulong), "action dev_id", FAULT_ON_ERROR); fprintf(fp, " dev_id: %08lx\n", value); /* next */ readmem(action + OFFSET(irqaction_next), KVADDR, &value, sizeof(ulong), "action next", FAULT_ON_ERROR); fprintf(fp, " next: %lx\n", value); /* keep going if there are chained interrupts */ action = value; } fprintf(fp, " DEPTH: %x\n\n", level); } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int ppc_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on ppc) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int ppc_get_smp_cpus(void) { return (get_cpus_online() > 0) ? get_cpus_online() : kt->cpus; } /* * Machine dependent command. */ void ppc_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); ppc_display_machine_stats(); } /* * "mach" command output. */ static void ppc_display_machine_stats(void) { int c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " PLATFORM: %s\n", PPC_PLATFORM); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (tt->flags & IRQSTACKS) { fprintf(fp, "HARD IRQ STACK SIZE: %ld\n", SIZE(irq_ctx)); fprintf(fp, " HARD IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->hardirq_ctx[c]) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->hardirq_ctx[c]); } fprintf(fp, "SOFT IRQ STACK SIZE: %ld\n", SIZE(irq_ctx)); fprintf(fp, " SOFT IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->softirq_ctx[c]) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->softirq_ctx[c]); } } } static const char *hook_files[] = { "arch/ppc/kernel/entry.S", "arch/ppc/kernel/head.S", }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook ppc_line_number_hooks[] = { {"DoSyscall", ENTRY_S}, {"_switch", ENTRY_S}, {"ret_from_syscall_1", ENTRY_S}, {"ret_from_syscall_2", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"ret_from_intercept", ENTRY_S}, {"ret_from_except", ENTRY_S}, {"do_signal_ret", ENTRY_S}, {"ret_to_user_hook", ENTRY_S}, {"enter_rtas", ENTRY_S}, {"restore", ENTRY_S}, {"fake_interrupt", ENTRY_S}, {"lost_irq_ret", ENTRY_S}, {"do_bottom_half_ret", ENTRY_S}, {"ret_to_user_hook", ENTRY_S}, {"signal_return", ENTRY_S}, {"_stext", HEAD_S}, {"_start", HEAD_S}, {"__start", HEAD_S}, {"__after_mmu_off", HEAD_S}, {"turn_on_mmu", HEAD_S}, {"__secondary_hold", HEAD_S}, {"DataAccessCont", HEAD_S}, {"DataAccess", HEAD_S}, {"i0x300", HEAD_S}, {"DataSegmentCont", HEAD_S}, {"InstructionAccessCont", HEAD_S}, {"InstructionAccess", HEAD_S}, {"i0x400", HEAD_S}, {"InstructionSegmentCont", HEAD_S}, {"HardwareInterrupt", HEAD_S}, {"do_IRQ_intercept", HEAD_S}, {"i0x600", HEAD_S}, {"ProgramCheck", HEAD_S}, {"i0x700", HEAD_S}, {"FPUnavailable", HEAD_S}, {"i0x800", HEAD_S}, {"Decrementer", HEAD_S}, {"timer_interrupt_intercept", HEAD_S}, {"SystemCall", HEAD_S}, {"trap_0f_cont", HEAD_S}, {"Trap_0f", HEAD_S}, {"InstructionTLBMiss", HEAD_S}, {"InstructionAddressInvalid", HEAD_S}, {"DataLoadTLBMiss", HEAD_S}, {"DataAddressInvalid", HEAD_S}, {"DataStoreTLBMiss", HEAD_S}, {"AltiVecUnavailable", HEAD_S}, {"DataAccess", HEAD_S}, {"InstructionAccess", HEAD_S}, {"DataSegment", HEAD_S}, {"InstructionSegment", HEAD_S}, {"transfer_to_handler", HEAD_S}, {"stack_ovf", HEAD_S}, {"load_up_fpu", HEAD_S}, {"KernelFP", HEAD_S}, {"load_up_altivec", HEAD_S}, {"KernelAltiVec", HEAD_S}, {"giveup_altivec", HEAD_S}, {"giveup_fpu", HEAD_S}, {"relocate_kernel", HEAD_S}, {"copy_and_flush", HEAD_S}, {"fix_mem_constants", HEAD_S}, {"apus_interrupt_entry", HEAD_S}, {"__secondary_start_gemini", HEAD_S}, {"__secondary_start_psurge", HEAD_S}, {"__secondary_start_psurge2", HEAD_S}, {"__secondary_start_psurge3", HEAD_S}, {"__secondary_start_psurge99", HEAD_S}, {"__secondary_start", HEAD_S}, {"setup_common_caches", HEAD_S}, {"setup_604_hid0", HEAD_S}, {"setup_750_7400_hid0", HEAD_S}, {"load_up_mmu", HEAD_S}, {"start_here", HEAD_S}, {"clear_bats", HEAD_S}, {"flush_tlbs", HEAD_S}, {"mmu_off", HEAD_S}, {"initial_bats", HEAD_S}, {"setup_disp_bat", HEAD_S}, {"m8260_gorom", HEAD_S}, {"sdata", HEAD_S}, {"empty_zero_page", HEAD_S}, {"swapper_pg_dir", HEAD_S}, {"cmd_line", HEAD_S}, {"intercept_table", HEAD_S}, {"set_context", HEAD_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void ppc_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } /* * Try to relocate NT_PRSTATUS notes according by in kernel crash_notes. * Function is only called from ppc's get_regs. */ static int verify_crash_note_in_kernel(int cpu) { int ret; Elf32_Nhdr *note32; ulong crash_notes_ptr; char *buf, *name; ret = TRUE; if (!readmem(symbol_value("crash_notes"), KVADDR, &crash_notes_ptr, sizeof(ulong), "crash_notes", QUIET|RETURN_ON_ERROR) || !crash_notes_ptr) goto out; buf = GETBUF(SIZE(note_buf)); if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) crash_notes_ptr += kt->__per_cpu_offset[cpu]; if (!readmem(crash_notes_ptr, KVADDR, buf, SIZE(note_buf), "cpu crash_notes", QUIET|RETURN_ON_ERROR)) goto freebuf; note32 = (Elf32_Nhdr *)buf; name = (char *)(note32 + 1); if (note32->n_type != NT_PRSTATUS || note32->n_namesz != strlen("CORE") + 1 || strncmp(name, "CORE", note32->n_namesz) || note32->n_descsz != SIZE(elf_prstatus)) ret = FALSE; freebuf: FREEBUF(buf); out: return ret; } void ppc_relocate_nt_prstatus_percpu(void **nt_prstatus_percpu, uint *num_prstatus_notes) { static int relocated = FALSE; void **nt_ptr; int i, j, nrcpus; size_t size; /* relocation is possible only once */ if (relocated == TRUE) return; relocated = TRUE; if (!symbol_exists("crash_notes") || !VALID_STRUCT(note_buf) || !VALID_STRUCT(elf_prstatus)) return; size = NR_CPUS * sizeof(void *); nt_ptr = (void **)GETBUF(size); BCOPY(nt_prstatus_percpu, nt_ptr, size); BZERO(nt_prstatus_percpu, size); *num_prstatus_notes = 0; nrcpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = 0, j = 0; i < nrcpus; i++) { if (!in_cpu_map(ONLINE_MAP, i)) continue; if (verify_crash_note_in_kernel(i)) nt_prstatus_percpu[i] = nt_ptr[j++]; else if (CRASHDEBUG(1)) error(WARNING, "cpu#%d: crash_notes not saved\n", i); /* num_prstatus_notes is always equal to online cpus in ppc */ (*num_prstatus_notes)++; } FREEBUF(nt_ptr); } #endif /* PPC */ crash-7.1.4/global_data.c0000775000000000000000000001304012634305150013720 0ustar rootroot/* global_data.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2010, 2012-2013 David Anderson * Copyright (C) 2002-2006, 2010, 2012-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* * Data output FILE pointer. The contents of fp are changed on the fly * depending upon whether the output is going to stdout, redirected to a * user-designated pipe or file, or to the "standard" scrolling pipe. * Regardless of where it ends up, fprintf(fp, ...) is used throughout * instead of printf(). */ FILE *fp; /* * The state of the program is kept in the program_context structure. * Given that it's consulted so often, "pc" is globally available to * quickly access the structure contents. */ struct program_context program_context = { 0 }; struct program_context *pc = &program_context; /* * The same thing goes for accesses to the frequently-accessed task_table, * kernel_table, vm_table, symbol_table_data and machdep_table, making the * "tt", "kt", "vt", "st" and "machdep" pointers globally available. */ struct task_table task_table = { 0 }; struct task_table *tt = &task_table; struct kernel_table kernel_table = { 0 }; struct kernel_table *kt = &kernel_table; struct vm_table vm_table = { 0 }; struct vm_table *vt = &vm_table; struct symbol_table_data symbol_table_data = { 0 }; struct symbol_table_data *st = &symbol_table_data; struct machdep_table machdep_table = { 0 }; struct machdep_table *machdep = &machdep_table; /* * Command functions are entered with the args[] array and argcnt value * pre-set for issuance to getopt(). */ char *args[MAXARGS]; /* argument array */ int argcnt; /* argument count */ int argerrs; /* argument error counter */ /* * To add a new command, declare it in defs.h and enter it in this table. */ struct command_table_entry linux_command_table[] = { {"*", cmd_pointer, help_pointer, 0}, {"alias", cmd_alias, help_alias, 0}, {"ascii", cmd_ascii, help_ascii, 0}, {"bt", cmd_bt, help_bt, REFRESH_TASK_TABLE}, {"btop", cmd_btop, help_btop, 0}, {"dev", cmd_dev, help_dev, 0}, {"dis", cmd_dis, help_dis, MINIMAL}, {"eval", cmd_eval, help_eval, MINIMAL}, {"exit", cmd_quit, help_exit, MINIMAL}, {"extend", cmd_extend, help_extend, MINIMAL}, {"files", cmd_files, help_files, REFRESH_TASK_TABLE}, {"foreach", cmd_foreach, help_foreach, REFRESH_TASK_TABLE}, {"fuser", cmd_fuser, help_fuser, REFRESH_TASK_TABLE}, {"gdb", cmd_gdb, help_gdb, REFRESH_TASK_TABLE}, {"help", cmd_help, help_help, MINIMAL}, {"ipcs", cmd_ipcs, help_ipcs, REFRESH_TASK_TABLE}, {"irq", cmd_irq, help_irq, 0}, {"kmem", cmd_kmem, help_kmem, 0}, {"list", cmd_list, help__list, REFRESH_TASK_TABLE}, {"log", cmd_log, help_log, MINIMAL}, {"mach", cmd_mach, help_mach, 0}, {"map", cmd_map, help_map, HIDDEN_COMMAND}, {"mod", cmd_mod, help_mod, 0}, {"mount", cmd_mount, help_mount, 0}, {"net", cmd_net, help_net, REFRESH_TASK_TABLE}, {"p", cmd_p, help_p, 0}, {"ps", cmd_ps, help_ps, REFRESH_TASK_TABLE}, {"pte", cmd_pte, help_pte, 0}, {"ptob", cmd_ptob, help_ptob, 0}, {"ptov", cmd_ptov, help_ptov, 0}, {"q", cmd_quit, help_quit, MINIMAL}, {"tree", cmd_tree, help_tree, REFRESH_TASK_TABLE}, {"rd", cmd_rd, help_rd, MINIMAL}, {"repeat", cmd_repeat, help_repeat, 0}, {"runq", cmd_runq, help_runq, REFRESH_TASK_TABLE}, {"search", cmd_search, help_search, 0}, {"set", cmd_set, help_set, REFRESH_TASK_TABLE | MINIMAL}, {"sig", cmd_sig, help_sig, REFRESH_TASK_TABLE}, {"struct", cmd_struct, help_struct, 0}, {"swap", cmd_swap, help_swap, 0}, {"sym", cmd_sym, help_sym, MINIMAL}, {"sys", cmd_sys, help_sys, REFRESH_TASK_TABLE}, {"task", cmd_task, help_task, REFRESH_TASK_TABLE}, {"test", cmd_test, NULL, HIDDEN_COMMAND}, {"timer", cmd_timer, help_timer, 0}, {"union", cmd_union, help_union, 0}, {"vm", cmd_vm, help_vm, REFRESH_TASK_TABLE}, {"vtop", cmd_vtop, help_vtop, REFRESH_TASK_TABLE}, {"waitq", cmd_waitq, help_waitq, REFRESH_TASK_TABLE}, {"whatis", cmd_whatis, help_whatis, 0}, {"wr", cmd_wr, help_wr, 0}, #if defined(S390) || defined(S390X) {"s390dbf", cmd_s390dbf, help_s390dbf, 0}, #endif {(char *)NULL} }; struct extension_table *extension_table = NULL; /* * The offset_table and size_table structure contents are referenced * through several OFFSET- and SIZE-related macros. The array_table * is a shortcut used by get_array_length(). */ struct offset_table offset_table = { 0 }; struct size_table size_table = { 0 }; struct array_table array_table = { 0 }; crash-7.1.4/arm.c0000664000000000000000000013542712634305150012261 0ustar rootroot/* * arm.c - core analysis suite * * Authors: * Thomas Fänge * Jan Karlsson * Mika Westerberg * * Copyright (C) 2010-2011 Nokia Corporation * Copyright (C) 2010 Sony Ericsson. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef ARM #include #include "defs.h" static void arm_parse_cmdline_args(void); static int arm_get_crash_notes(void); static int arm_verify_symbol(const char *, ulong, char); static int arm_is_module_addr(ulong); static int arm_is_kvaddr(ulong); static int arm_is_uvaddr(ulong, struct task_context *); static int arm_in_exception_text(ulong); static int arm_in_ret_from_syscall(ulong, int *); static void arm_back_trace(struct bt_info *); static void arm_back_trace_cmd(struct bt_info *); static ulong arm_processor_speed(void); static int arm_translate_pte(ulong, void *, ulonglong); static int arm_vtop(ulong, ulong *, physaddr_t *, int); static int arm_kvtop(struct task_context *, ulong, physaddr_t *, int); static int arm_uvtop(struct task_context *, ulong, physaddr_t *, int); static int arm_get_frame(struct bt_info *, ulong *, ulong *); static int arm_get_dumpfile_stack_frame(struct bt_info *, ulong *, ulong *); static void arm_get_stack_frame(struct bt_info *, ulong *, ulong *); static void arm_dump_exception_stack(ulong, ulong); static void arm_display_full_frame(struct bt_info *, ulong); static ulong arm_vmalloc_start(void); static int arm_is_task_addr(ulong); static int arm_dis_filter(ulong, char *, unsigned int); static int arm_eframe_search(struct bt_info *); static ulong arm_get_task_pgd(ulong); static void arm_cmd_mach(void); static void arm_display_machine_stats(void); static int arm_get_smp_cpus(void); static void arm_init_machspec(void); static struct line_number_hook arm_line_number_hooks[]; static struct machine_specific arm_machine_specific; /** * struct arm_cpu_context_save - idle task registers * * This structure holds idle task registers. Only FP, SP, and PC are needed for * unwinding the stack. */ struct arm_cpu_context_save { ulong fp; ulong sp; ulong pc; }; /* * Holds registers during the crash. */ static struct arm_pt_regs *panic_task_regs; #define PGDIR_SIZE() (4 * PAGESIZE()) #define PGDIR_OFFSET(X) (((ulong)(X)) & (PGDIR_SIZE() - 1)) #define _SECTION_PAGE_MASK (~((MEGABYTES(1))-1)) #define PMD_TYPE_MASK 3 #define PMD_TYPE_SECT 2 #define PMD_TYPE_TABLE 1 #define PMD_TYPE_SECT_LPAE 1 static inline ulong * pmd_page_addr(ulong pmd) { ulong ptr; if (machdep->flags & PGTABLE_V2) { ptr = PAGEBASE(pmd); } else { ptr = pmd & ~(PTRS_PER_PTE * sizeof(void *) - 1); ptr += PTRS_PER_PTE * sizeof(void *); } return (ulong *)ptr; } /* * "Linux" PTE definitions. */ #define L_PTE_PRESENT (1 << 0) #define L_PTE_YOUNG (1 << 1) #define L_PTE_FILE (1 << 2) #define L_PTE_DIRTY (1 << 6) #define L_PTE_WRITE (1 << 7) #define L_PTE_RDONLY L_PTE_WRITE #define L_PTE_USER (1 << 8) #define L_PTE_EXEC (1 << 9) #define L_PTE_XN L_PTE_EXEC #define L_PTE_SHARED (1 << 10) #define pte_val(pte) (pte) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) #define pte_rdonly(pte) (pte_val(pte) & L_PTE_RDONLY) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) #define pte_xn(pte) (pte_val(pte) & L_PTE_XN) /* * Following stuff is taken directly from the kernel sources. These are used in * dump_exception_stack() to format an exception stack entry. */ #define USR26_MODE 0x00000000 #define FIQ26_MODE 0x00000001 #define IRQ26_MODE 0x00000002 #define SVC26_MODE 0x00000003 #define USR_MODE 0x00000010 #define FIQ_MODE 0x00000011 #define IRQ_MODE 0x00000012 #define SVC_MODE 0x00000013 #define ABT_MODE 0x00000017 #define UND_MODE 0x0000001b #define SYSTEM_MODE 0x0000001f #define MODE32_BIT 0x00000010 #define MODE_MASK 0x0000001f #define PSR_T_BIT 0x00000020 #define PSR_F_BIT 0x00000040 #define PSR_I_BIT 0x00000080 #define PSR_A_BIT 0x00000100 #define PSR_E_BIT 0x00000200 #define PSR_J_BIT 0x01000000 #define PSR_Q_BIT 0x08000000 #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 #define PSR_N_BIT 0x80000000 #define isa_mode(regs) \ ((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \ (((regs)->ARM_cpsr & PSR_T_BIT) >> 5)) #define processor_mode(regs) \ ((regs)->ARM_cpsr & MODE_MASK) #define interrupts_enabled(regs) \ (!((regs)->ARM_cpsr & PSR_I_BIT)) #define fast_interrupts_enabled(regs) \ (!((regs)->ARM_cpsr & PSR_F_BIT)) static const char *processor_modes[] = { "USER_26", "FIQ_26", "IRQ_26", "SVC_26", "UK4_26", "UK5_26", "UK6_26", "UK7_26" , "UK8_26", "UK9_26", "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32", "IRQ_32", "SVC_32", "UK4_32", "UK5_32", "UK6_32", "ABT_32", "UK8_32", "UK9_32", "UK10_32", "UND_32", "UK12_32", "UK13_32", "UK14_32", "SYS_32", }; static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE", }; #define NOT_IMPLEMENTED() \ error(FATAL, "%s: N/A\n", __func__) /* * Do all necessary machine-specific setup here. This is called several times * during initialization. */ void arm_init(int when) { ulong vaddr; char *string; struct syment *sp; #if defined(__i386__) || defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the ARM architecture\n"); #endif switch (when) { case PRE_SYMTAB: machdep->verify_symbol = arm_verify_symbol; machdep->machspec = &arm_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; if (machdep->cmdline_args[0]) arm_parse_cmdline_args(); break; case PRE_GDB: if ((machdep->pgd = (char *)malloc(PGDIR_SIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PMDSIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); /* * LPAE requires an additional page for the PGD, * so PG_DIR_SIZE = 0x5000 for LPAE */ if ((string = pc->read_vmcoreinfo("CONFIG_ARM_LPAE"))) { machdep->flags |= PAE; free(string); } else if ((sp = next_symbol("swapper_pg_dir", NULL)) && (sp->value - symbol_value("swapper_pg_dir")) == 0x5000) machdep->flags |= PAE; machdep->kvbase = symbol_value("_stext") & ~KVBASE_MASK; machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = arm_is_kvaddr; machdep->is_uvaddr = arm_is_uvaddr; machdep->eframe_search = arm_eframe_search; machdep->back_trace = arm_back_trace_cmd; machdep->processor_speed = arm_processor_speed; machdep->uvtop = arm_uvtop; machdep->kvtop = arm_kvtop; machdep->get_task_pgd = arm_get_task_pgd; machdep->get_stack_frame = arm_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = arm_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = arm_vmalloc_start; machdep->is_task_addr = arm_is_task_addr; machdep->dis_filter = arm_dis_filter; machdep->cmd_mach = arm_cmd_mach; machdep->get_smp_cpus = arm_get_smp_cpus; machdep->line_number_hooks = arm_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; arm_init_machspec(); break; case POST_GDB: /* * Starting from 2.6.38 hardware and Linux page tables * were reordered. See also mainline kernel commit * d30e45eeabe (ARM: pgtable: switch order of Linux vs * hardware page tables). */ if (THIS_KERNEL_VERSION > LINUX(2,6,37) || STRUCT_EXISTS("pteval_t")) machdep->flags |= PGTABLE_V2; if (THIS_KERNEL_VERSION >= LINUX(3,3,0) || symbol_exists("idmap_pgd")) machdep->flags |= IDMAP_PGD; if (machdep->flags & PAE) { machdep->section_size_bits = _SECTION_SIZE_BITS_LPAE; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_LPAE; } else { machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; } if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); /* * Registers for idle threads are saved in * thread_info.cpu_context. */ STRUCT_SIZE_INIT(cpu_context_save, "cpu_context_save"); MEMBER_OFFSET_INIT(cpu_context_save_fp, "cpu_context_save", "fp"); MEMBER_OFFSET_INIT(cpu_context_save_sp, "cpu_context_save", "sp"); MEMBER_OFFSET_INIT(cpu_context_save_pc, "cpu_context_save", "pc"); MEMBER_OFFSET_INIT(thread_info_cpu_context, "thread_info", "cpu_context"); /* * We need to have information about note_buf_t which is used to * hold ELF note containing registers and status of the thread * that panic'd. */ STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_pid, "elf_prstatus", "pr_pid"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); if (!machdep->hz) machdep->hz = 100; break; case POST_VM: machdep->machspec->vmalloc_start_addr = vt->high_memory; /* * Modules are placed in first vmalloc'd area. This is 16MB * below PAGE_OFFSET. */ machdep->machspec->modules_end = machdep->kvbase - 1; vaddr = first_vmalloc_address(); if (vaddr > machdep->machspec->modules_end) machdep->machspec->modules_vaddr = DEFAULT_MODULES_VADDR; else machdep->machspec->modules_vaddr = vaddr; /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!ACTIVE() && !arm_get_crash_notes()) error(WARNING, "cannot retrieve registers for active task%s\n\n", kt->cpus > 1 ? "s" : ""); if (init_unwind_tables()) { if (CRASHDEBUG(1)) fprintf(fp, "using unwind tables\n"); } else { if (CRASHDEBUG(1)) fprintf(fp, "using framepointers\n"); } break; case LOG_ONLY: machdep->machspec = &arm_machine_specific; machdep->kvbase = kt->vmcoreinfo._stext_SYMBOL & 0xffff0000UL; arm_init_machspec(); break; } } void arm_dump_machdep_table(ulong arg) { const struct machine_specific *ms; int others, i; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PHYS_BASE) fprintf(fp, "%sPHYS_BASE", others++ ? "|" : ""); if (machdep->flags & PGTABLE_V2) fprintf(fp, "%sPGTABLE_V2", others++ ? "|" : ""); if (machdep->flags & IDMAP_PGD) fprintf(fp, "%sIDMAP_PGD", others++ ? "|" : ""); if (machdep->flags & PAE) fprintf(fp, "%sPAE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->kvbase); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %lx\n", (ulong)machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: arm_eframe_search()\n"); fprintf(fp, " back_trace: arm_back_trace_cmd()\n"); fprintf(fp, " processor_speed: arm_processor_speed()\n"); fprintf(fp, " uvtop: arm_uvtop()\n"); fprintf(fp, " kvtop: arm_kvtop()\n"); fprintf(fp, " get_task_pgd: arm_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: arm_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: arm_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: arm_vmalloc_start()\n"); fprintf(fp, " is_task_addr: arm_is_task_addr()\n"); fprintf(fp, " verify_symbol: arm_verify_symbol()\n"); fprintf(fp, " dis_filter: arm_dis_filter()\n"); fprintf(fp, " cmd_mach: arm_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: arm_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: arm_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: arm_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " xendump_p2m_create: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: NULL\n"); fprintf(fp, " line_number_hooks: arm_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, "clear_machdep_cache: NULL\n"); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } ms = machdep->machspec; fprintf(fp, " machspec: %lx\n", (ulong)ms); fprintf(fp, " phys_base: %lx\n", ms->phys_base); fprintf(fp, " vmalloc_start_addr: %lx\n", ms->vmalloc_start_addr); fprintf(fp, " modules_vaddr: %lx\n", ms->modules_vaddr); fprintf(fp, " modules_end: %lx\n", ms->modules_end); fprintf(fp, " kernel_text_start: %lx\n", ms->kernel_text_start); fprintf(fp, " kernel_text_end: %lx\n", ms->kernel_text_end); fprintf(fp, "exception_text_start: %lx\n", ms->exception_text_start); fprintf(fp, " exception_text_end: %lx\n", ms->exception_text_end); fprintf(fp, " crash_task_regs: %lx\n", (ulong)ms->crash_task_regs); fprintf(fp, "unwind_index_prel31: %d\n", ms->unwind_index_prel31); } /* * Parse machine dependent command line arguments. * * Force the phys_base address via: * * --machdep phys_base=
*/ static void arm_parse_cmdline_args(void) { int index, i, c, err; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *p; ulong value = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %x\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { err = 0; if (STRNEQ(arglist[i], "phys_base=")) { int megabytes = FALSE; int flags = RETURN_ON_ERROR | QUIET; if ((LASTCHAR(arglist[i]) == 'm') || (LASTCHAR(arglist[i]) == 'M')) { LASTCHAR(arglist[i]) = NULLCHAR; megabytes = TRUE; } p = arglist[i] + strlen("phys_base="); if (strlen(p)) { if (megabytes) value = dtol(p, flags, &err); else value = htol(p, flags, &err); } if (!err) { if (megabytes) value = MEGABYTES(value); machdep->machspec->phys_base = value; error(NOTE, "setting phys_base to: 0x%lx\n", machdep->machspec->phys_base); machdep->flags |= PHYS_BASE; continue; } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } } } /* * Retrieve task registers for the time of the crash. */ static int arm_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf32_Nhdr *note; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i; if (!symbol_exists("crash_notes")) return FALSE; crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); return FALSE; } if (symbol_exists("__per_cpu_offset")) { /* Add __per_cpu_offset for each cpu to form the pointer to the notes */ for (i = 0; icpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(panic_task_regs = calloc((size_t)kt->cpus, sizeof(*panic_task_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i=0;icpus;i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "failed to read note_buf_t\n"); goto fail; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf32_Nhdr *)buf; p = buf + sizeof(Elf32_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf32_Nhdr); if (sizeof(Elf32_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } } if (note->n_type != NT_PRSTATUS) { error(WARNING, "invalid note (n_type != NT_PRSTATUS)\n"); goto fail; } if (p[0] != 'C' || p[1] != 'O' || p[2] != 'R' || p[3] != 'E') { error(WARNING, "invalid note (name != \"CORE\"\n"); goto fail; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf32_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } /* * And finally we have the registers for the crashed task. This is * used later on when dumping backtrace. */ ms->crash_task_regs = panic_task_regs; FREEBUF(buf); FREEBUF(notes_ptrs); return TRUE; fail: FREEBUF(buf); FREEBUF(notes_ptrs); free(panic_task_regs); return FALSE; } /* * Accept or reject a symbol from the kernel namelist. */ static int arm_verify_symbol(const char *name, ulong value, char type) { if (STREQ(name, "swapper_pg_dir")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if (STREQ(name, "$a") || STREQ(name, "$n") || STREQ(name, "$d")) return FALSE; if (STREQ(name, "PRRR") || STREQ(name, "NMRR")) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); return TRUE; } static int arm_is_module_addr(ulong vaddr) { ulong modules_start; ulong modules_end = machdep->machspec->modules_end; if (!MODULES_VADDR) { /* * In case we are still initializing, and vm_init() has not been * called, we use defaults here which is 16MB below kernel start * address. */ modules_start = DEFAULT_MODULES_VADDR; } else { modules_start = MODULES_VADDR; } return (vaddr >= modules_start && vaddr <= modules_end); } int arm_is_vmalloc_addr(ulong vaddr) { if (arm_is_module_addr(vaddr)) return TRUE; if (!VMALLOC_START) return FALSE; return (vaddr >= VMALLOC_START); } /* * Check whether given address falls inside kernel address space (including * modules). */ static int arm_is_kvaddr(ulong vaddr) { if (arm_is_module_addr(vaddr)) return TRUE; return (vaddr >= machdep->kvbase); } static int arm_is_uvaddr(ulong vaddr, struct task_context *unused) { if (arm_is_module_addr(vaddr)) return FALSE; return (vaddr < machdep->kvbase); } /* * Returns TRUE if given pc is in exception area. */ static int arm_in_exception_text(ulong pc) { ulong exception_start = machdep->machspec->exception_text_start; ulong exception_end = machdep->machspec->exception_text_end; if (exception_start && exception_end) return (pc >= exception_start && pc < exception_end); return FALSE; } /* * Returns TRUE if given pc points to a return from syscall * entrypoint. In case the function returns TRUE and if offset is given, * it is filled with the offset that should be added to the SP to get * address of the exception frame where the user registers are. */ static int arm_in_ret_from_syscall(ulong pc, int *offset) { /* * On fast syscall return path, the stack looks like: * * SP + 0 {r4, r5} * SP + 8 user pt_regs * * The asm syscall handler pushes fifth and sixth registers * onto the stack before calling the actual syscall handler. * * So in order to print out the user registers at the time * the syscall was made, we need to adjust SP for 8. */ if (pc == symbol_value("ret_fast_syscall")) { if (offset) *offset = 8; return TRUE; } /* * In case we are on the slow syscall path, the SP already * points to the start of the user registers hence no * adjustments needs to be done. */ if (pc == symbol_value("ret_slow_syscall")) { if (offset) *offset = 0; return TRUE; } return FALSE; } /* * Unroll the kernel stack using a minimal amount of gdb services. */ static void arm_back_trace(struct bt_info *bt) { int n = 0; /* * In case bt->machdep contains pointer to a full register set, we take * FP from there. */ if (bt->machdep) { const struct arm_pt_regs *regs = bt->machdep; bt->frameptr = regs->ARM_fp; } /* * Stack frame layout: * optionally saved caller registers (r4 - r10) * saved fp * saved sp * saved lr * frame => saved pc * optionally saved arguments (r0 - r3) * saved sp => * * Functions start with the following code sequence: * mov ip, sp * stmfd sp!, {r0 - r3} (optional) * corrected pc => stmfd sp!, {..., fp, ip, lr, pc} */ while (bt->frameptr && INSTACK(bt->frameptr, bt)) { ulong from; ulong sp; /* * We correct the PC to point to the actual instruction (current * value is PC + 8). */ bt->instptr = GET_STACK_ULONG(bt->frameptr - 0); bt->instptr -= 8; /* * Now get LR, saved SP and FP from the frame as well. */ from = GET_STACK_ULONG(bt->frameptr - 4); sp = GET_STACK_ULONG(bt->frameptr - 8); bt->frameptr = GET_STACK_ULONG(bt->frameptr - 12); arm_dump_backtrace_entry(bt, n++, from, sp); bt->stkptr = sp; } } /* * Unroll a kernel stack. */ static void arm_back_trace_cmd(struct bt_info *bt) { if (bt->flags & BT_REGS_NOT_FOUND) return; if (kt->flags & DWARF_UNWIND) unwind_backtrace(bt); else arm_back_trace(bt); } /* * Calculate and return the speed of the processor. */ static ulong arm_processor_speed(void) { /* * For now, we don't support reading CPU speed. */ return 0; } /* * Translate a PTE, returning TRUE if the page is present. If a physaddr pointer * is passed in, don't print anything. */ static int arm_translate_pte(ulong pte, void *physaddr, ulonglong lpae_pte) { char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char buf[BUFSIZE]; int page_present; ulonglong paddr; int len1, len2, others; if (machdep->flags & PAE) { paddr = LPAE_PAGEBASE(lpae_pte); sprintf(ptebuf, "%llx", lpae_pte); pte = (ulong)lpae_pte; } else { paddr = PAGEBASE(pte); sprintf(ptebuf, "%lx", pte); } page_present = pte_present(pte); if (physaddr) { if (machdep->flags & PAE) *((ulonglong *)physaddr) = paddr; else *((ulong *)physaddr) = (ulong)paddr; return page_present; } len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER | LJUST, "PTE")); if (!page_present && pte) { /* swap page, not handled yet */ return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER | LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER | RJUST, NULL), mkstring(physbuf, len2, CENTER | RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte_present(pte)) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte_dirty(pte)) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte_young(pte)) fprintf(fp, "%sYOUNG", others++ ? "|" : ""); if (machdep->flags & PGTABLE_V2) { if (!pte_rdonly(pte)) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (!pte_xn(pte)) fprintf(fp, "%sEXEC", others++ ? "|" : ""); } else { if (pte_write(pte)) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (pte_exec(pte)) fprintf(fp, "%sEXEC", others++ ? "|" : ""); } } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return 0; } /* * Virtual to physical memory translation. This function will be called by both * arm_kvtop() and arm_uvtop(). */ static int arm_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose) { char buf[BUFSIZE]; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; /* * Page tables in ARM Linux * * In hardware PGD is 16k (having 4096 pointers to PTE) and PTE is 1k * (containing 256 translations). * * Linux, however, wants to have PTEs as page sized entities. This means * that in ARM Linux we have following setup (see also * arch/arm/include/asm/pgtable.h) * * Before 2.6.38 * * PGD PTE * +---------+ * | | 0 ----> +------------+ * +- - - - -+ | h/w pt 0 | * | | 4 ----> +------------+ +1024 * +- - - - -+ | h/w pt 1 | * . . +------------+ +2048 * . . | Linux pt 0 | * . . +------------+ +3072 * | | 4095 | Linux pt 1 | * +---------+ +------------+ +4096 * * Starting from 2.6.38 * * PGD PTE * +---------+ * | | 0 ----> +------------+ * +- - - - -+ | Linux pt 0 | * | | 4 ----> +------------+ +1024 * +- - - - -+ | Linux pt 1 | * . . +------------+ +2048 * . . | h/w pt 0 | * . . +------------+ +3072 * | | 4095 | h/w pt 1 | * +---------+ +------------+ +4096 * * So in Linux implementation we have two hardware pointers to second * level page tables. Depending on the kernel version, the "Linux" page * tables either follow or precede the hardware tables. * * Linux PT entries contain bits that are not supported on hardware, for * example "young" and "dirty" flags. * * Our translation scheme only uses Linux PTEs here. */ if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* * pgd_offset(pgd, vaddr) */ page_dir = pgd + PGD_OFFSET(vaddr) * 2; /* The unity-mapped region is mapped using 1MB pages, * hence 1-level translation if bit 20 is set; if we * are 1MB apart physically, we move the page_dir in * case bit 20 is set. */ if (((vaddr) >> (20)) & 1) page_dir = page_dir + 1; FILL_PGD(PAGEBASE(pgd), KVADDR, PGDIR_SIZE()); pgd_pte = ULONG(machdep->pgd + PGDIR_OFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) return FALSE; /* * pmd_offset(pgd, vaddr) * * Here PMD is folded into a PGD. */ pmd_pte = pgd_pte; page_middle = page_dir; if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if ((pmd_pte & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = pmd_pte & _SECTION_PAGE_MASK; if (verbose) { fprintf(fp, " PAGE: %s (1MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(sectionbase))); } *paddr = sectionbase + (vaddr & ~_SECTION_PAGE_MASK); return TRUE; } /* * pte_offset_map(pmd, vaddr) */ page_table = pmd_page_addr(pmd_pte) + PTE_OFFSET(vaddr); FILL_PTBL(PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) { fprintf(fp, " PTE: %s => %lx\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_table)), pte); } if (!pte_present(pte)) { if (pte && verbose) { fprintf(fp, "\n"); arm_translate_pte(pte, 0, 0); } return FALSE; } *paddr = PAGEBASE(pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(PAGEBASE(pte)))); arm_translate_pte(pte, 0, 0); } return TRUE; } /* * Virtual to physical memory translation when "CONFIG_ARM_LPAE=y". * This function will be called by both arm_kvtop() and arm_uvtop(). */ static int arm_lpae_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose) { char buf[BUFSIZE]; physaddr_t page_dir; physaddr_t page_middle; physaddr_t page_table; pgd_t pgd_pmd; pmd_t pmd_pte; pte_t pte; if (!vt->vmalloc_start) { *paddr = LPAE_VTOP(vaddr); return TRUE; } if (!IS_VMALLOC_ADDR(vaddr)) { *paddr = LPAE_VTOP(vaddr); if (!verbose) return TRUE; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* * pgd_offset(pgd, vaddr) */ page_dir = LPAE_VTOP((ulong)pgd + LPAE_PGD_OFFSET(vaddr) * 8); FILL_PGD_LPAE(LPAE_VTOP(pgd), PHYSADDR, LPAE_PGDIR_SIZE()); pgd_pmd = ULONGLONG(machdep->pgd + LPAE_PGDIR_OFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %8llx => %llx\n", (ulonglong)page_dir, pgd_pmd); if (!pgd_pmd) return FALSE; /* * pmd_offset(pgd, vaddr) */ page_middle = LPAE_PAGEBASE(pgd_pmd) + LPAE_PMD_OFFSET(vaddr) * 8; FILL_PMD_LPAE(LPAE_PAGEBASE(pgd_pmd), PHYSADDR, LPAE_PMDIR_SIZE()); pmd_pte = ULONGLONG(machdep->pmd + LPAE_PMDIR_OFFSET(page_middle)); if (!pmd_pte) return FALSE; if ((pmd_pte & PMD_TYPE_MASK) == PMD_TYPE_SECT_LPAE) { ulonglong sectionbase = LPAE_PAGEBASE(pmd_pte) & LPAE_SECTION_PAGE_MASK; if (verbose) fprintf(fp, " PAGE: %8llx (2MB)\n\n", (ulonglong)sectionbase); *paddr = sectionbase + (vaddr & ~LPAE_SECTION_PAGE_MASK); return TRUE; } /* * pte_offset_map(pmd, vaddr) */ page_table = LPAE_PAGEBASE(pmd_pte) + PTE_OFFSET(vaddr) * 8; FILL_PTBL_LPAE(LPAE_PAGEBASE(pmd_pte), PHYSADDR, LPAE_PTEDIR_SIZE()); pte = ULONGLONG(machdep->ptbl + LPAE_PTEDIR_OFFSET(page_table)); if (verbose) { fprintf(fp, " PTE: %8llx => %llx\n\n", (ulonglong)page_table, pte); } if (!pte_present(pte)) { if (pte && verbose) { fprintf(fp, "\n"); arm_translate_pte(0, 0, pte); } return FALSE; } *paddr = LPAE_PAGEBASE(pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(PAGEBASE(pte)))); arm_translate_pte(0, 0, pte); } return TRUE; } /* * Translates a user virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int arm_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); /* * Before idmap_pgd was introduced with upstream commit 2c8951ab0c * (ARM: idmap: use idmap_pgd when setting up mm for reboot), the * panic task pgd was overwritten by soft reboot code, so we can't do * any vtop translations. */ if (!(machdep->flags & IDMAP_PGD) && tc->task == tt->panic_task) error(FATAL, "panic task pgd is trashed by soft reboot code\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(uvaddr)) { ulong active_mm; readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } else { ulong mm; mm = task_mm(tc->task, TRUE); if (mm) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (machdep->flags & PAE) return arm_lpae_vtop(uvaddr, pgd, paddr, verbose); return arm_vtop(uvaddr, pgd, paddr, verbose); } /* * Translates a kernel virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int arm_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if (machdep->flags & PAE) return arm_lpae_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } return arm_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); } /* * Get SP and PC values for idle tasks. */ static int arm_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { const char *cpu_context; if (!bt->tc || !(tt->flags & THREAD_INFO)) return FALSE; /* * Update thread_info in tt. */ if (!fill_thread_info(bt->tc->thread_info)) return FALSE; cpu_context = tt->thread_info + OFFSET(thread_info_cpu_context); #define GET_REG(ptr, cp, off) ((*ptr) = (*((ulong *)((cp) + OFFSET(off))))) /* * Unwinding code needs FP value also so we pass it with bt. */ GET_REG(&bt->frameptr, cpu_context, cpu_context_save_fp); GET_REG(spp, cpu_context, cpu_context_save_sp); GET_REG(pcp, cpu_context, cpu_context_save_pc); return TRUE; } /* * Get the starting point for the active cpu in a diskdump. */ static int arm_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { const struct machine_specific *ms = machdep->machspec; if (!ms->crash_task_regs || (!ms->crash_task_regs[bt->tc->processor].ARM_pc && !ms->crash_task_regs[bt->tc->processor].ARM_sp)) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } /* * We got registers for panic task from crash_notes. Just return them. */ *nip = ms->crash_task_regs[bt->tc->processor].ARM_pc; *ksp = ms->crash_task_regs[bt->tc->processor].ARM_sp; /* * Also store pointer to all registers in case unwinding code needs * to access LR. */ bt->machdep = &(ms->crash_task_regs[bt->tc->processor]); return TRUE; } /* * Get a stack frame combination of PC and SP from the most relevant spot. */ static void arm_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { ulong ip, sp; int ret; ip = sp = 0; bt->machdep = NULL; if (DUMPFILE() && is_task_active(bt->task)) ret = arm_get_dumpfile_stack_frame(bt, &ip, &sp); else ret = arm_get_frame(bt, &ip, &sp); if (!ret) error(WARNING, "cannot determine starting stack frame for task %lx\n", bt->task); if (pcp) *pcp = ip; if (spp) *spp = sp; } /* * Prints out exception stack starting from start. */ void arm_dump_exception_stack(ulong start, ulong end) { struct arm_pt_regs regs; ulong flags; char buf[64]; if (!readmem(start, KVADDR, ®s, sizeof(regs), "exception regs", RETURN_ON_ERROR)) { error(WARNING, "failed to read exception registers\n"); return; } fprintf(fp, " pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" " sp : %08lx ip : %08lx fp : %08lx\n", regs.ARM_pc, regs.ARM_lr, regs.ARM_cpsr, regs.ARM_sp, regs.ARM_ip, regs.ARM_fp); fprintf(fp, " r10: %08lx r9 : %08lx r8 : %08lx\n", regs.ARM_r10, regs.ARM_r9, regs.ARM_r8); fprintf(fp, " r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", regs.ARM_r7, regs.ARM_r6, regs.ARM_r5, regs.ARM_r4); fprintf(fp, " r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs.ARM_r3, regs.ARM_r2, regs.ARM_r1, regs.ARM_r0); flags = regs.ARM_cpsr; buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; buf[4] = '\0'; fprintf(fp, " Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s\n", buf, interrupts_enabled(®s) ? "n" : "ff", fast_interrupts_enabled(®s) ? "n" : "ff", processor_modes[processor_mode(®s)], isa_modes[isa_mode(®s)]); } static void arm_display_full_frame(struct bt_info *bt, ulong sp) { ulong words, addr; ulong *up; char buf[BUFSIZE]; int i, u_idx; if (!INSTACK(sp, bt) || !INSTACK(bt->stkptr, bt)) return; words = (sp - bt->stkptr) / sizeof(ulong); if (words == 0) { fprintf(fp, " (no frame)\n"); return; } addr = bt->stkptr; u_idx = (bt->stkptr - bt->stackbase) / sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if ((i % 4) == 0) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx * sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } /* * Prints out a single stack frame. What is printed depends on flags passed in * with bt. * * What is expected when calling this function: * bt->frameptr = current FP (or 0 if there is no such) * bt->stkptr = current SP * bt->instptr = current PC * * from = LR * sp = previous/saved SP */ void arm_dump_backtrace_entry(struct bt_info *bt, int level, ulong from, ulong sp) { struct load_module *lm; const char *name; int offset = 0; struct syment *symp; ulong symbol_offset; char *name_plus_offset; char buf[BUFSIZE]; name = closest_symbol(bt->instptr); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { symp = value_search(bt->instptr, &symbol_offset); if (symp && symbol_offset) name_plus_offset = value_to_symstr(bt->instptr, buf, bt->radix); } if (module_symbol(bt->instptr, NULL, &lm, NULL, 0)) { fprintf(fp, "%s#%d [<%08lx>] (%s [%s]) from [<%08lx>]\n", level < 10 ? " " : "", level, bt->instptr, name_plus_offset ? name_plus_offset : name, lm->mod_name, from); } else { fprintf(fp, "%s#%d [<%08lx>] (%s) from [<%08lx>]\n", level < 10 ? " " : "", level, bt->instptr, name_plus_offset ? name_plus_offset : name, from); } if (bt->flags & BT_LINE_NUMBERS) { char buf[BUFSIZE]; get_line_number(bt->instptr, buf, FALSE); if (strlen(buf)) fprintf(fp, " %s\n", buf); } if (arm_in_exception_text(bt->instptr)) { arm_dump_exception_stack(sp, sp + sizeof(struct arm_pt_regs)); } else if (arm_in_ret_from_syscall(from, &offset)) { ulong nsp = sp + offset; arm_dump_exception_stack(nsp, nsp + sizeof(struct arm_pt_regs)); } if (bt->flags & BT_FULL) { if (kt->flags & DWARF_UNWIND) { fprintf(fp, " " "[PC: %08lx LR: %08lx SP: %08lx SIZE: %ld]\n", bt->instptr, from, bt->stkptr, sp - bt->stkptr); } else { fprintf(fp, " " "[PC: %08lx LR: %08lx SP: %08lx FP: %08lx " "SIZE: %ld]\n", bt->instptr, from, bt->stkptr, bt->frameptr, sp - bt->stkptr); } arm_display_full_frame(bt, sp); } } /* * Determine where vmalloc'd memory starts. */ static ulong arm_vmalloc_start(void) { machdep->machspec->vmalloc_start_addr = vt->high_memory; return vt->high_memory; } /* * Checks whether given task is valid task address. */ static int arm_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); return (IS_KVADDR(task) && ALIGNED_STACK_OFFSET(task) == 0); } /* * Filter dissassembly output if the output radix is not gdb's default 10 */ static int arm_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on arm) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Look for likely exception frames in a stack. */ static int arm_eframe_search(struct bt_info *bt) { return (NOT_IMPLEMENTED()); } /* * Get the relevant page directory pointer from a task structure. */ static ulong arm_get_task_pgd(ulong task) { return (NOT_IMPLEMENTED()); } /* * Machine dependent command. */ static void arm_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != -1) { switch (c) { case 'c': case 'm': fprintf(fp, "ARM: '-%c' option is not supported\n", c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); arm_display_machine_stats(); } static void arm_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } static int arm_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_present())) return cpus; else return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Initialize ARM specific stuff. */ static void arm_init_machspec(void) { struct machine_specific *ms = machdep->machspec; ulong phys_base; if (symbol_exists("__exception_text_start") && symbol_exists("__exception_text_end")) { ms->exception_text_start = symbol_value("__exception_text_start"); ms->exception_text_end = symbol_value("__exception_text_end"); } if (symbol_exists("_stext") && symbol_exists("_etext")) { ms->kernel_text_start = symbol_value("_stext"); ms->kernel_text_end = symbol_value("_etext"); } if (CRASHDEBUG(1)) { fprintf(fp, "kernel text: [%lx - %lx]\n", ms->kernel_text_start, ms->kernel_text_end); fprintf(fp, "exception text: [%lx - %lx]\n", ms->exception_text_start, ms->exception_text_end); } if (machdep->flags & PHYS_BASE) /* --machdep override */ return; /* * Next determine suitable value for phys_base. User can override this * by passing valid '--machdep phys_base=' option. */ ms->phys_base = 0; if (ACTIVE()) { char buf[BUFSIZE]; char *p1; int errflag; FILE *fp; if ((fp = fopen("/proc/iomem", "r")) == NULL) return; /* * Memory regions are sorted in ascending order. We take the * first region which should be correct for most uses. */ errflag = 1; while (fgets(buf, BUFSIZE, fp)) { if (strstr(buf, ": System RAM")) { clean_line(buf); errflag = 0; break; } } fclose(fp); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; *p1 = NULLCHAR; phys_base = htol(buf, RETURN_ON_ERROR | QUIET, &errflag); if (errflag) return; ms->phys_base = phys_base; } else if (DISKDUMP_DUMPFILE() && diskdump_phys_base(&phys_base)) { ms->phys_base = phys_base; } else if (KDUMP_DUMPFILE() && arm_kdump_phys_base(&phys_base)) { ms->phys_base = phys_base; } else { error(WARNING, "phys_base cannot be determined from the dumpfile.\n" "Using default value of 0. If this is not correct,\n" "consider using '--machdep phys_base='\n"); } if (CRASHDEBUG(1)) fprintf(fp, "using %lx as phys_base\n", ms->phys_base); } static const char *hook_files[] = { "arch/arm/kernel/entry-armv.S", "arch/arm/kernel/entry-common.S", }; #define ENTRY_ARMV_S ((char **)&hook_files[0]) #define ENTRY_COMMON_S ((char **)&hook_files[1]) static struct line_number_hook arm_line_number_hooks[] = { { "__dabt_svc", ENTRY_ARMV_S }, { "__irq_svc", ENTRY_ARMV_S }, { "__und_svc", ENTRY_ARMV_S }, { "__pabt_svc", ENTRY_ARMV_S }, { "__switch_to", ENTRY_ARMV_S }, { "ret_fast_syscall", ENTRY_COMMON_S }, { "ret_slow_syscall", ENTRY_COMMON_S }, { "ret_from_fork", ENTRY_COMMON_S }, { NULL, NULL }, }; #endif /* ARM */ crash-7.1.4/qemu-load.h0000664000000000000000000001167712634305150013373 0ustar rootroot/* * Qemu save VM file description * * Copyright (C) 2009 Red Hat, Inc. * Written by Paolo Bonzini. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef QEMU_LOAD_H #define QEMU_LOAD_H 1 #include #include #include #include enum qemu_save_section { QEMU_VM_EOF, QEMU_VM_SECTION_START, QEMU_VM_SECTION_PART, QEMU_VM_SECTION_END, QEMU_VM_SECTION_FULL, QEMU_VM_SUBSECTION }; enum qemu_features { QEMU_FEATURE_RAM = 1, QEMU_FEATURE_CPU = 2, QEMU_FEATURE_TIMER = 4, QEMU_FEATURE_KVM = 8 }; struct qemu_device_list { struct qemu_device *head, *tail; uint32_t features; }; struct qemu_device_loader { const char *name; struct qemu_device *(*init_load) (struct qemu_device_list *, uint32_t, uint32_t, uint32_t, bool, FILE *); }; struct qemu_device_vtbl { const char *name; uint32_t (*load) (struct qemu_device *, FILE *, enum qemu_save_section); void (*free) (struct qemu_device *, struct qemu_device_list *); }; struct qemu_device { struct qemu_device_vtbl *vtbl; struct qemu_device_list *list; struct qemu_device *next; struct qemu_device *prev; uint32_t section_id; uint32_t instance_id; uint32_t version_id; }; struct qemu_device_ram { struct qemu_device dev_base; uint64_t last_ram_offset; FILE *fp; off_t *offsets; }; union qemu_uint128_t { uint32_t i[4]; unsigned i128 __attribute__ ((vector_size (16))); }; struct qemu_x86_seg { uint64_t base; uint32_t selector; uint32_t limit; uint32_t flags; }; struct qemu_x86_sysenter { uint32_t cs; uint64_t esp; uint64_t eip; }; union qemu_fpu_reg { long double ld; char bytes[10]; uint64_t mmx; }; struct qemu_x86_vmtrr { uint64_t base; uint64_t mask; }; struct qemu_x86_svm { uint64_t hsave; uint64_t vmcb; uint64_t tsc_offset; uint8_t in_vmm : 1; uint8_t guest_if_mask : 1; uint8_t guest_intr_masking : 1; uint16_t cr_read_mask; uint16_t cr_write_mask; uint16_t dr_read_mask; uint16_t dr_write_mask; uint32_t exception_intercept_mask; uint64_t intercept_mask; }; struct qemu_x86_kvm { uint64_t int_bitmap[4]; uint64_t tsc; uint32_t mp_state; uint32_t exception_injected; uint8_t soft_interrupt; uint8_t nmi_injected; uint8_t nmi_pending; uint8_t has_error_code; uint32_t sipi_vector; uint64_t system_time_msr; uint64_t wall_clock_msr; }; struct qemu_x86_mce { uint64_t mcg_cap; uint64_t mcg_status; uint64_t mcg_ctl; uint64_t mce_banks[10 * 4]; }; struct qemu_device_x86 { struct qemu_device dev_base; uint32_t halted; uint32_t irq; uint64_t regs[16]; uint64_t eip; uint64_t eflags; uint16_t fpucw; uint16_t fpusw; uint16_t fpu_free; union qemu_fpu_reg st[8]; struct qemu_x86_seg cs; struct qemu_x86_seg ds; struct qemu_x86_seg es; struct qemu_x86_seg ss; struct qemu_x86_seg fs; struct qemu_x86_seg gs; struct qemu_x86_seg ldt; struct qemu_x86_seg tr; struct qemu_x86_seg gdt; struct qemu_x86_seg idt; struct qemu_x86_sysenter sysenter; uint64_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t dr[8]; uint8_t cr8; uint8_t soft_mmu : 1; uint8_t smm : 1; uint8_t a20_masked : 1; uint8_t global_if : 1; uint8_t in_nmi : 1; uint32_t mxcsr; union qemu_uint128_t xmm[16]; uint64_t efer; uint64_t star; uint64_t lstar; uint64_t cstar; uint64_t fmask; uint64_t kernel_gs_base; uint64_t pat; uint32_t smbase; struct qemu_x86_svm svm; uint64_t fixed_mtrr[11]; uint64_t deftype_mtrr; struct qemu_x86_vmtrr variable_mtrr[8]; struct qemu_x86_kvm kvm; struct qemu_x86_mce mce; uint64_t tsc_aux; uint64_t xcr0; uint64_t xstate_bv; union qemu_uint128_t ymmh_regs[16]; }; struct qemu_timer { uint64_t cpu_ticks_offset; uint64_t ticks_per_sec; uint64_t cpu_clock_offset; }; struct qemu_device *device_alloc (struct qemu_device_list *, size_t, struct qemu_device_vtbl *, uint32_t, uint32_t, uint32_t); void device_free (struct qemu_device *); void device_list_free (struct qemu_device_list *); struct qemu_device *device_find (struct qemu_device_list *, uint32_t); struct qemu_device *device_find_instance (struct qemu_device_list *, const char *, uint32_t); struct qemu_device_list *qemu_load (const struct qemu_device_loader *, uint32_t, FILE *); int ram_read_phys_page (struct qemu_device_ram *, void *, uint64_t); /* For a 32-bit KVM host. */ extern const struct qemu_device_loader devices_x86_32[]; /* For a 64-bit KVM host. */ extern const struct qemu_device_loader devices_x86_64[]; #endif crash-7.1.4/rse.h0000664000000000000000000000461512634305150012272 0ustar rootroot#ifndef _ASM_IA64_RSE_H #define _ASM_IA64_RSE_H /* * Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999 David Mosberger-Tang */ /* * rse.h * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * include/asm-ia64/rse.h (2.4.9-e.3) */ /* * Register stack engine related helper functions. This file may be * used in applications, so be careful about the name-space and give * some consideration to non-GNU C compilers (though __inline__ is * fine). */ static __inline__ unsigned long ia64_rse_slot_num (unsigned long *addr) { return (((unsigned long) addr) >> 3) & 0x3f; } /* * Return TRUE if ADDR is the address of an RNAT slot. */ static __inline__ unsigned long ia64_rse_is_rnat_slot (unsigned long *addr) { return ia64_rse_slot_num(addr) == 0x3f; } /* * Returns the address of the RNAT slot that covers the slot at * address SLOT_ADDR. */ static __inline__ unsigned long * ia64_rse_rnat_addr (unsigned long *slot_addr) { return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3)); } /* * Calcuate the number of registers in the dirty partition starting at * BSPSTORE with a size of DIRTY bytes. This isn't simply DIRTY * divided by eight because the 64th slot is used to store ar.rnat. */ static __inline__ unsigned long ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp) { unsigned long slots = (bsp - bspstore); return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40; } /* * The inverse of the above: given bspstore and the number of * registers, calculate ar.bsp. */ static __inline__ unsigned long * ia64_rse_skip_regs (unsigned long *addr, long num_regs) { long delta = ia64_rse_slot_num(addr) + num_regs; if (num_regs < 0) delta -= 0x3e; return addr + num_regs + delta/0x3f; } #endif /* _ASM_IA64_RSE_H */ crash-7.1.4/x86.c0000775000000000000000000047446412634305150012141 0ustar rootroot/* x86.c - core analysis suite * * Portions Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2014 David Anderson * Copyright (C) 2002-2014 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef X86 /* * NOTICE OF APPRECIATION * * The stack-trace related code in this file is an extension of the stack * trace code from the Mach in-kernel debugger "ddb". Sincere thanks to * the author(s). * */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ #include "defs.h" #include "xen_hyper_defs.h" #ifndef MCLX #include #include #include #include #include #include #include #include #include #include /* * Machine register set. */ struct db_variable db_regs[] = { "cs", &ddb_regs.tf_cs, FCN_NULL, "ds", &ddb_regs.tf_ds, FCN_NULL, "es", &ddb_regs.tf_es, FCN_NULL, #if 0 "fs", &ddb_regs.tf_fs, FCN_NULL, "gs", &ddb_regs.tf_gs, FCN_NULL, #endif "ss", &ddb_regs.tf_ss, FCN_NULL, "eax", &ddb_regs.tf_eax, FCN_NULL, "ecx", &ddb_regs.tf_ecx, FCN_NULL, "edx", &ddb_regs.tf_edx, FCN_NULL, "ebx", &ddb_regs.tf_ebx, FCN_NULL, "esp", &ddb_regs.tf_esp, FCN_NULL, "ebp", &ddb_regs.tf_ebp, FCN_NULL, "esi", &ddb_regs.tf_esi, FCN_NULL, "edi", &ddb_regs.tf_edi, FCN_NULL, "eip", &ddb_regs.tf_eip, FCN_NULL, "efl", &ddb_regs.tf_eflags, FCN_NULL, }; struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]); #else typedef int db_strategy_t; /* search strategy */ #define DB_STGY_ANY 0 /* anything goes */ #define DB_STGY_XTRN 1 /* only external symbols */ #define DB_STGY_PROC 2 /* only procedures */ typedef ulong db_addr_t; /* address - unsigned */ typedef int db_expr_t; /* expression - signed */ /* * Symbol representation is specific to the symtab style: * BSD compilers use dbx' nlist, other compilers might use * a different one */ typedef char * db_sym_t; /* opaque handle on symbols */ #define DB_SYM_NULL ((db_sym_t)0) typedef uint boolean_t; #endif /* !MCLX */ /* * Stack trace. */ #ifdef MCLX static db_expr_t db_get_value(db_addr_t, int, boolean_t, struct bt_info *); #define INKERNEL(va) (machdep->kvtop(CURRENT_CONTEXT(), va, &phys, 0)) #else #define INKERNEL(va) (((vm_offset_t)(va)) >= USRSTACK) #endif struct i386_frame { struct i386_frame *f_frame; int f_retaddr; int f_arg0; }; #ifdef MCLX #define NORMAL 0 #define IDT_DIRECT_ENTRY 1 #define IDT_JMP_ERROR_CODE 2 #define RET_FROM_INTR 3 #define SIGNAL_RETURN 4 #else #define NORMAL 0 #define TRAP 1 #define INTERRUPT 2 #define SYSCALL 3 #endif #ifndef MCLX typedef vm_offset_t db_addr_t; #endif #ifdef MCLX struct eframe { int eframe_found; int eframe_type; ulong eframe_addr; ulong jmp_error_code_eip; }; static void db_nextframe(struct i386_frame **, db_addr_t *, struct eframe *, struct bt_info *); static int dump_eframe(struct eframe *, int, struct bt_info *); static int eframe_numargs(ulong eip, struct bt_info *); static int check_for_eframe(char *, struct bt_info *); static void x86_user_eframe(struct bt_info *); static ulong x86_next_eframe(ulong addr, struct bt_info *bt); static void x86_cmd_mach(void); static int x86_get_smp_cpus(void); static void x86_display_machine_stats(void); static void x86_display_cpu_data(unsigned int); static void x86_display_memmap(void); static int x86_omit_frame_pointer(void); static void x86_back_trace_cmd(struct bt_info *); static int is_rodata_text(ulong); static int mach_CRASHDEBUG(ulong); static db_sym_t db_search_symbol(db_addr_t, db_strategy_t,db_expr_t *); static void db_symbol_values(db_sym_t, char **, db_expr_t *); static int db_sym_numargs(db_sym_t, int *, char **); static void x86_dump_line_number(ulong); static void x86_clear_machdep_cache(void); static ulong mach_debug = 0; static int mach_CRASHDEBUG(ulong dval) { if (CRASHDEBUG(dval)) return TRUE; return (mach_debug >= dval); } #else static void db_nextframe(struct i386_frame **, db_addr_t *); #endif #ifdef MCLX static int db_numargs(struct i386_frame *, struct bt_info *bt); static void db_print_stack_entry(char *, int, char **, int *, db_addr_t, struct bt_info *, struct eframe *, int, struct i386_frame *); #else static void db_print_stack_entry (char *, int, char **, int *, db_addr_t); #endif /* * Figure out how many arguments were passed into the frame at "fp". */ static int db_numargs(fp, bt) struct i386_frame *fp; struct bt_info *bt; { int *argp; int inst; int args; argp = (int *)db_get_value((int)&fp->f_retaddr, 4, FALSE, bt); /* * etext is wrong for LKMs. We should attempt to interpret * the instruction at the return address in all cases. This * may require better fault handling. */ #ifdef MCLX if (!is_kernel_text((ulong)argp)) { #else if (argp < (int *)btext || argp >= (int *)etext) { #endif args = 5; } else { inst = db_get_value((int)argp, 4, FALSE, bt); if ((inst & 0xff) == 0x59) /* popl %ecx */ args = 1; else if ((inst & 0xffff) == 0xc483) /* addl $Ibs, %esp */ args = ((inst >> 16) & 0xff) / 4; else args = 5; } return (args); } #ifdef MCLX static int eframe_numargs(ulong eip, struct bt_info *bt) { int inst; int args; if (!is_kernel_text(eip)) args = 5; else { inst = db_get_value((int)eip, 4, FALSE, bt); if ((inst & 0xff) == 0x59) /* popl %ecx */ args = 1; else if ((inst & 0xffff) == 0xc483) /* addl $Ibs, %esp */ args = ((inst >> 16) & 0xff) / 4; else args = 5; } return args; } #endif static void #ifdef MCLX db_print_stack_entry(name, narg, argnp, argp, callpc, bt, ep, fnum, frame) #else db_print_stack_entry(name, narg, argnp, argp, callpc) #endif char *name; int narg; char **argnp; int *argp; db_addr_t callpc; #ifdef MCLX struct bt_info *bt; struct eframe *ep; int fnum; struct i386_frame *frame; #endif { #ifdef MCLX int i; db_expr_t arg; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char *sp; if (!name) { if (IS_MODULE_VADDR(callpc) && module_symbol(callpc, NULL, NULL, buf1, *gdb_output_radix)) { sprintf(buf2, "(%s)", buf1); name = buf2; } else name = "(unknown module)"; } if (strstr(name, "_MODULE_START_")) { sprintf(buf3, "(%s module)", name + strlen("_MODULE_START_")); name = buf3; } if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (ep->eframe_found && ep->jmp_error_code_eip) { if (STREQ(closest_symbol(ep->jmp_error_code_eip), bt->ref->str) || STREQ(closest_symbol(callpc), bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } else if (STREQ(name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; break; case BT_REF_HEXVAL: if (ep->eframe_found && ep->jmp_error_code_eip && (bt->ref->hexval == ep->jmp_error_code_eip)) bt->ref->cmdflags |= BT_REF_FOUND; else if (bt->ref->hexval == callpc) bt->ref->cmdflags |= BT_REF_FOUND; break; } return; } else { fprintf(fp, "%s#%d [%08lx] ", fnum < 10 ? " " : "", fnum, (ulong)frame); if (ep->eframe_found && ep->jmp_error_code_eip) fprintf(fp, "%s (via %s)", closest_symbol(callpc), closest_symbol(ep->jmp_error_code_eip)); else fprintf(fp, "%s", name); fprintf(fp, " at %lx\n", callpc); } if (ep->eframe_found) goto done_entry; if (STREQ(name, "L6")) goto done_entry; fprintf(fp, " ("); if ((i = get_function_numargs(callpc)) >= 0) narg = i; while (narg) { if (argnp) fprintf(fp, "%s=", *argnp++); arg = db_get_value((int)argp, 4, FALSE, bt); if ((sp = value_symbol(arg))) fprintf(fp, "%s", sp); else if ((bt->flags & BT_SYMBOLIC_ARGS) && strlen(value_to_symstr(arg, buf1, 0))) fprintf(fp, "%s", buf1); else fprintf(fp, "%x", arg); argp++; if (--narg != 0) fprintf(fp, ", "); } if (i == 0) fprintf(fp, "void"); fprintf(fp, ")\n"); done_entry: if (bt->flags & BT_LINE_NUMBERS) x86_dump_line_number(callpc); return; #else db_printf("%s(", name); while (narg) { if (argnp) db_printf("%s=", *argnp++); db_printf("%r", db_get_value((int)argp, 4, FALSE, bt)); argp++; if (--narg != 0) db_printf(","); } db_printf(") at "); db_printsym(callpc, DB_STGY_PROC); db_printf("\n"); return; #endif } #ifdef MCLX static db_sym_t db_search_symbol(db_addr_t val, db_strategy_t strategy, db_expr_t *offp) { struct syment *sp; ulong offset; if ((sp = value_search(val, &offset))) { *offp = (db_expr_t)offset; return(sp->name); } else return DB_SYM_NULL; } /* * Return name and value of a symbol */ static void db_symbol_values(db_sym_t sym, char **namep, db_expr_t *valuep) { struct syment *sp; if (sym == DB_SYM_NULL) { *namep = 0; return; } if ((sp = symbol_search(sym)) == NULL) { error(INFO, "db_symbol_values: cannot find symbol: %s\n", sym); *namep = 0; return; } *namep = sp->name; if (valuep) *valuep = sp->value; #ifndef MCLX X_db_symbol_values(db_last_symtab, sym, namep, &value); if (db_symbol_is_ambiguous(sym)) *namep = db_qualify(sym, db_last_symtab->name); if (valuep) *valuep = value; #endif } static unsigned db_extend[] = { /* table for sign-extending */ 0, 0xFFFFFF80U, 0xFFFF8000U, 0xFF800000U }; static db_expr_t db_get_value(addr, size, is_signed, bt) db_addr_t addr; int size; boolean_t is_signed; struct bt_info * bt; { char data[sizeof(int)]; db_expr_t value; int i; #ifndef MCLX db_read_bytes(addr, size, data); #else BZERO(data, sizeof(int)); if (INSTACK(addr, bt)) { if (size == sizeof(ulong)) return (db_expr_t)GET_STACK_ULONG(addr); else GET_STACK_DATA(addr, data, size); } else { if ((size == sizeof(int)) && text_value_cache(addr, 0, (uint32_t *)&value)) return value; if (!readmem(addr, KVADDR, &value, size, "db_get_value", RETURN_ON_ERROR)) error(FATAL, "db_get_value: read error: address: %lx\n", addr); if (size == sizeof(int)) text_value_cache(addr, value, NULL); } #endif value = 0; #if BYTE_MSF for (i = 0; i < size; i++) #else /* BYTE_LSF */ for (i = size - 1; i >= 0; i--) #endif { value = (value << 8) + (data[i] & 0xFF); } if (size < 4) { if (is_signed && (value & db_extend[size]) != 0) value |= db_extend[size]; } return (value); } static int db_sym_numargs(db_sym_t sym, int *nargp, char **argnames) { return FALSE; } #endif /* * Figure out the next frame up in the call stack. */ #ifdef MCLX static void db_nextframe(fp, ip, ep, bt) struct i386_frame **fp; /* in/out */ db_addr_t *ip; /* out */ struct eframe *ep; struct bt_info *bt; #else static void db_nextframe(fp, ip) struct i386_frame **fp; /* in/out */ db_addr_t *ip; /* out */ #endif { int eip, ebp; db_expr_t offset; char *sym, *name; #ifdef MCLX static int last_ebp; static int last_eip; struct syment *sp; #endif eip = db_get_value((int) &(*fp)->f_retaddr, 4, FALSE, bt); ebp = db_get_value((int) &(*fp)->f_frame, 4, FALSE, bt); /* * Figure out frame type, presuming normal. */ BZERO(ep, sizeof(struct eframe)); ep->eframe_type = NORMAL; sym = db_search_symbol(eip, DB_STGY_ANY, &offset); db_symbol_values(sym, &name, NULL); if (name != NULL) { ep->eframe_type = check_for_eframe(name, bt); #ifndef MCLX if (!strcmp(name, "calltrap")) { frame_type = TRAP; } else if (!strncmp(name, "Xresume", 7)) { frame_type = INTERRUPT; } else if (!strcmp(name, "_Xsyscall")) { frame_type = SYSCALL; } #endif } switch (ep->eframe_type) { case NORMAL: ep->eframe_found = FALSE; break; case IDT_DIRECT_ENTRY: case RET_FROM_INTR: case SIGNAL_RETURN: ep->eframe_found = TRUE; ep->eframe_addr = x86_next_eframe(last_ebp + sizeof(ulong)*2, bt); break; case IDT_JMP_ERROR_CODE: ep->eframe_found = TRUE; ep->eframe_addr = x86_next_eframe(last_ebp + sizeof(ulong) * 4, bt); if ((sp = x86_jmp_error_code(last_eip))) ep->jmp_error_code_eip = sp->value; break; default: error(FATAL, "unknown exception frame type?\n"); } *ip = (db_addr_t) eip; *fp = (struct i386_frame *) ebp; last_ebp = ebp; last_eip = eip; return; #ifndef MCLX db_print_stack_entry(name, 0, 0, 0, eip); /* * Point to base of trapframe which is just above the * current frame. */ tf = (struct trapframe *) ((int)*fp + 8); esp = (ISPL(tf->tf_cs) == SEL_UPL) ? tf->tf_esp : (int)&tf->tf_esp; switch (frame_type) { case TRAP: if (INKERNEL((int) tf)) { eip = tf->tf_eip; ebp = tf->tf_ebp; db_printf( "--- trap %#r, eip = %#r, esp = %#r, ebp = %#r ---\n", tf->tf_trapno, eip, esp, ebp); } break; case SYSCALL: if (INKERNEL((int) tf)) { eip = tf->tf_eip; ebp = tf->tf_ebp; db_printf( "--- syscall %#r, eip = %#r, esp = %#r, ebp = %#r ---\n", tf->tf_eax, eip, esp, ebp); } break; case INTERRUPT: tf = (struct trapframe *)((int)*fp + 16); if (INKERNEL((int) tf)) { eip = tf->tf_eip; ebp = tf->tf_ebp; db_printf( "--- interrupt, eip = %#r, esp = %#r, ebp = %#r ---\n", eip, esp, ebp); } break; default: break; } *ip = (db_addr_t) eip; *fp = (struct i386_frame *) ebp; #endif } #ifdef MCLX void x86_back_trace_cmd(struct bt_info *bt) #else ulong db_stack_trace_cmd(addr, have_addr, count, modif, task, flags) db_expr_t addr; boolean_t have_addr; db_expr_t count; char *modif; ulong task; ulong flags; #endif /* MCLX */ { struct i386_frame *frame; int *argp; db_addr_t callpc; boolean_t first; #ifdef MCLX db_expr_t addr; boolean_t have_addr; db_expr_t count; char *modif; db_addr_t last_callpc; ulong lastframe; physaddr_t phys; int frame_number; int forced; struct eframe eframe, *ep; char dbuf[BUFSIZE]; if (!(bt->flags & BT_USER_SPACE) && (!bt->stkptr || !accessible(bt->stkptr))) { error(INFO, "cannot determine starting stack pointer\n"); if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, fp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, fp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, fp); return; } if (bt->flags & BT_USER_SPACE) { if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, fp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, fp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, fp); fprintf(fp, " #0 [user space]\n"); return; } else if ((bt->flags & BT_KERNEL_SPACE)) { if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, fp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, fp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, fp); } addr = bt->stkptr; have_addr = TRUE; count = 50; modif = (char *)bt->instptr; mach_debug = bt->debug; if ((machdep->flags & OMIT_FRAME_PTR) || bt->debug || (bt->flags & BT_FRAMESIZE_DEBUG) || !(bt->flags & BT_OLD_BACK_TRACE)) { bt->flags &= ~BT_OLD_BACK_TRACE; lkcd_x86_back_trace(bt, 0, fp); return; } if (mach_CRASHDEBUG(2)) { fprintf(fp, "--> stkptr: %lx instptr: %lx (%s)\n", bt->stkptr, bt->instptr, closest_symbol(bt->instptr)); } #endif if (count == -1) count = 65535; if (!have_addr) { #ifndef MCLX frame = (struct i386_frame *)ddb_regs.tf_ebp; if (frame == NULL) frame = (struct i386_frame *)(ddb_regs.tf_esp - 4); callpc = (db_addr_t)ddb_regs.tf_eip; #endif } else { frame = (struct i386_frame *)addr; lastframe = (ulong)frame; ep = &eframe; BZERO(ep, sizeof(struct eframe)); ep->eframe_found = FALSE; callpc = (db_addr_t)db_get_value((int)&frame->f_retaddr, 4, FALSE, bt); if (modif) { frame_number = 0; forced = TRUE; callpc = (db_addr_t)modif; } else { frame_number = 1; forced = FALSE; if (!is_kernel_text(callpc)) error(INFO, "callpc from stack is not a text address\n"); } } first = TRUE; while (count--) { struct i386_frame *actframe; int narg; char * name; db_expr_t offset; db_sym_t sym; #define MAXNARG 16 char *argnames[MAXNARG], **argnp = NULL; sym = db_search_symbol(callpc, DB_STGY_ANY, &offset); db_symbol_values(sym, &name, NULL); /* * Attempt to determine a (possibly fake) frame that gives * the caller's pc. It may differ from `frame' if the * current function never sets up a standard frame or hasn't * set one up yet or has just discarded one. The last two * cases can be guessed fairly reliably for code generated * by gcc. The first case is too much trouble to handle in * general because the amount of junk on the stack depends * on the pc (the special handling of "calltrap", etc. in * db_nextframe() works because the `next' pc is special). */ actframe = frame; if (first && !have_addr) { #ifdef MCLX error(FATAL, "cannot handle \"!have_addr\" path #2\n"); #else int instr; instr = db_get_value(callpc, 4, FALSE); if ((instr & 0x00ffffff) == 0x00e58955) { /* pushl %ebp; movl %esp, %ebp */ actframe = (struct i386_frame *) (ddb_regs.tf_esp - 4); } else if ((instr & 0x0000ffff) == 0x0000e589) { /* movl %esp, %ebp */ actframe = (struct i386_frame *) ddb_regs.tf_esp; if (ddb_regs.tf_ebp == 0) { /* Fake the caller's frame better. */ frame = actframe; } } else if ((instr & 0x000000ff) == 0x000000c3) { /* ret */ actframe = (struct i386_frame *) (ddb_regs.tf_esp - 4); } else if (offset == 0) { /* Probably a symbol in assembler code. */ actframe = (struct i386_frame *) (ddb_regs.tf_esp - 4); } #endif } first = FALSE; argp = &actframe->f_arg0; narg = MAXNARG; if (sym != NULL && db_sym_numargs(sym, &narg, argnames)) { argnp = argnames; } else { narg = db_numargs(frame, bt); } #ifdef MCLX if (is_kernel_text(callpc) || IS_MODULE_VADDR(callpc)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> (1) lastframe: %lx => frame: %lx\n", lastframe, (ulong)frame); db_print_stack_entry(name, narg, argnp, argp, callpc, bt, ep, frame_number++, frame); if (STREQ(closest_symbol(callpc), "start_secondary")) break; if (BT_REFERENCE_FOUND(bt)) return; if ((ulong)frame < lastframe) { break; } if (INSTACK(frame, bt) && ((ulong)frame > lastframe)) lastframe = (ulong)frame; } else { if (!(forced && frame_number == 1)) { if (is_kernel_data(callpc)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break(1): callpc %lx is data?\n", callpc); if (!is_rodata_text(callpc)) break; } if (mach_CRASHDEBUG(2)) fprintf(fp, "--> (2) lastframe: %lx => frame: %lx\n", lastframe, (ulong)frame); db_print_stack_entry(name, narg, argnp, argp, callpc, bt, ep, frame_number++, frame); if (BT_REFERENCE_FOUND(bt)) return; if ((ulong)frame < lastframe) { break; } if (INSTACK(frame, bt) && ((ulong)frame > lastframe)) lastframe = (ulong)frame; } } if (!INSTACK(frame, bt)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break: !INSTACK(frame: %lx, task: %lx)\n", (ulong)frame, bt->task); break; } #else db_print_stack_entry(name, narg, argnp, argp, callpc); #endif if (actframe != frame) { /* `frame' belongs to caller. */ callpc = (db_addr_t) db_get_value((int)&actframe->f_retaddr, 4, FALSE, bt); continue; } if (ep->eframe_found) frame_number = dump_eframe(ep, frame_number, bt); last_callpc = callpc; skip_frame: db_nextframe(&frame, &callpc, ep, bt); if (mach_CRASHDEBUG(2)) { fprintf(fp, "--> db_nextframe: frame: %lx callpc: %lx [%s]\n", (ulong)frame, callpc, value_to_symstr(callpc, dbuf,0)); if (callpc == last_callpc) fprintf(fp, "last callpc == callpc!\n"); } if ((callpc == last_callpc) && STREQ(closest_symbol(callpc), "smp_stop_cpu_interrupt")) goto skip_frame; if (INSTACK(frame, bt) && ((ulong)frame < lastframe)) if (mach_CRASHDEBUG(2)) fprintf(fp, "--> frame pointer reversion?\n"); if (INKERNEL((int) callpc) && !INKERNEL((int) frame)) { sym = db_search_symbol(callpc, DB_STGY_ANY, &offset); db_symbol_values(sym, &name, NULL); if (is_kernel_data(callpc)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break(2): callpc %lx is data?\n", callpc); if (!is_rodata_text(callpc)) break; } if (mach_CRASHDEBUG(2)) fprintf(fp, "--> (3) lastframe: %lx => frame: %lx\n", lastframe, (ulong)frame); db_print_stack_entry(name, 0, 0, 0, callpc, bt, ep, frame_number++, frame); if (BT_REFERENCE_FOUND(bt)) return; if ((ulong)frame < lastframe) { if (STREQ(closest_symbol(callpc), "reschedule")) x86_user_eframe(bt); break; } if (INSTACK(frame, bt) && ((ulong)frame > lastframe)) lastframe = (ulong)frame; if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break: INKERNEL(callpc: %lx [%s]) && !INKERNEL(frame: %lx)\n", callpc, value_to_symstr(callpc, dbuf, 0), (ulong)frame); break; } if (!INKERNEL((int) frame)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break: !INKERNEL(frame: %lx)\n", (ulong)frame); break; } } if (mach_CRASHDEBUG(2)) { fprintf(fp, "--> returning lastframe: %lx\n", lastframe); } if (ep->eframe_found) frame_number = dump_eframe(ep, frame_number, bt); #ifndef MCLX return(lastframe); #endif } /* * The remainder of this file was generated at MCL to segregate * x86-specific needs. */ static int x86_uvtop(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop(struct task_context *, ulong, physaddr_t *, int); static int x86_uvtop_PAE(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop_PAE(struct task_context *, ulong, physaddr_t *, int); static int x86_uvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static int x86_uvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop_remap(ulong, physaddr_t *); static ulong x86_get_task_pgd(ulong); static ulong x86_processor_speed(void); static ulong x86_get_pc(struct bt_info *); static ulong x86_get_sp(struct bt_info *); static void x86_get_stack_frame(struct bt_info *, ulong *, ulong *); static int x86_translate_pte(ulong, void *, ulonglong); static uint64_t x86_memory_size(void); static ulong x86_vmalloc_start(void); static ulong *read_idt_table(int); static void eframe_init(void); static int remap_init(void); #define READ_IDT_INIT 1 #define READ_IDT_RUNTIME 2 static char *extract_idt_function(ulong *, char *, ulong *); static int x86_is_task_addr(ulong); static int x86_verify_symbol(const char *, ulong, char); static int x86_eframe_search(struct bt_info *); static ulong x86_in_irqstack(ulong); static int x86_dis_filter(ulong, char *, unsigned int); static struct line_number_hook x86_line_number_hooks[]; static int x86_is_uvaddr(ulong, struct task_context *); static void x86_init_kernel_pgd(void); static ulong xen_m2p_nonPAE(ulong); static int x86_xendump_p2m_create(struct xendump_data *); static int x86_pvops_xendump_p2m_create(struct xendump_data *); static int x86_pvops_xendump_p2m_l2_create(struct xendump_data *); static int x86_pvops_xendump_p2m_l3_create(struct xendump_data *); static void x86_debug_dump_page(FILE *, char *, char *); static int x86_xen_kdump_p2m_create(struct xen_kdump_data *); static char *x86_xen_kdump_load_page(ulong, char *); static char *x86_xen_kdump_load_page_PAE(ulong, char *); static ulong x86_xen_kdump_page_mfn(ulong); static ulong x86_xen_kdump_page_mfn_PAE(ulong); static ulong x86_xendump_panic_task(struct xendump_data *); static void x86_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); static char *x86_xendump_load_page(ulong, char *); static char *x86_xendump_load_page_PAE(ulong, char *); static int x86_xendump_page_index(ulong); static int x86_xendump_page_index_PAE(ulong); static void x86_init_hyper(int); static ulong x86_get_stackbase_hyper(ulong); static ulong x86_get_stacktop_hyper(ulong); int INT_EFRAME_SS = 14; int INT_EFRAME_ESP = 13; int INT_EFRAME_EFLAGS = 12; /* CS lcall7 */ int INT_EFRAME_CS = 11; /* EIP lcall7 */ int INT_EFRAME_EIP = 10; /* EFLAGS lcall7 */ int INT_EFRAME_ERR = 9; int INT_EFRAME_ES = 8; int INT_EFRAME_DS = 7; int INT_EFRAME_EAX = 6; int INT_EFRAME_EBP = 5; int INT_EFRAME_EDI = 4; int INT_EFRAME_ESI = 3; int INT_EFRAME_EDX = 2; int INT_EFRAME_ECX = 1; int INT_EFRAME_EBX = 0; int INT_EFRAME_GS = -1; #define MAX_USER_EFRAME_SIZE (17) #define KERNEL_EFRAME_SIZE (INT_EFRAME_EFLAGS+1) #define EFRAME_USER (1) #define EFRAME_KERNEL (2) #define DPL_BITS (0x3) static int dump_eframe(struct eframe *ep, int frame_number, struct bt_info *bt) { int i; char buf[BUFSIZE], *sp; ulong int_eframe[MAX_USER_EFRAME_SIZE]; int eframe_type, args; ulong value, *argp; eframe_type = 0; if (STACK_OFFSET_TYPE(ep->eframe_addr) > STACKSIZE()) return(frame_number); GET_STACK_DATA(ep->eframe_addr, (char *)int_eframe, SIZE(pt_regs)); if (int_eframe[INT_EFRAME_CS] & DPL_BITS) { if (!INSTACK(ep->eframe_addr + SIZE(pt_regs) - 1, bt)) return(frame_number); /* error(FATAL, "read of exception frame would go beyond stack\n"); */ eframe_type = EFRAME_USER; } else { if (!INSTACK(ep->eframe_addr + (KERNEL_EFRAME_SIZE*sizeof(ulong)) - 1, bt)) return(frame_number); /* error(FATAL, "read of exception frame would go beyond stack\n"); */ eframe_type = EFRAME_KERNEL; } x86_dump_eframe_common(bt, int_eframe, (eframe_type == EFRAME_KERNEL)); if (bt->flags & BT_EFRAME_SEARCH) return 0; if (eframe_type == EFRAME_USER) return(frame_number); if (BT_REFERENCE_CHECK(bt)) return(++frame_number); /* * The exception occurred while executing in kernel mode. * Pull out the EIP from the exception frame and display * the frame line. Then figure out whether it's possible to * show any arguments. */ fprintf(fp, "%s#%d [%08lx] %s at %08lx\n", frame_number < 10 ? " " : "", frame_number, int_eframe[INT_EFRAME_EBP], value_to_symstr(int_eframe[INT_EFRAME_EIP], buf, 0), int_eframe[INT_EFRAME_EIP]); frame_number++; if ((sp = closest_symbol(int_eframe[INT_EFRAME_EIP])) == NULL) return(frame_number); value = symbol_value(sp); argp = (ulong *)(int_eframe[INT_EFRAME_EBP] + (sizeof(long)*2)); args = is_system_call(NULL, value) ? 4 : eframe_numargs(int_eframe[INT_EFRAME_EIP], bt); fprintf(fp, " ("); for (i = 0; i < args; i++, argp++) { if (INSTACK(argp, bt)) value = GET_STACK_ULONG((ulong)argp); else /* impossible! */ readmem((ulong)argp, KVADDR, &value, sizeof(ulong), "syscall arg", FAULT_ON_ERROR); if (i) fprintf(fp, ", "); if ((sp = value_symbol(value))) fprintf(fp, "%s", sp); else if ((bt->flags & BT_SYMBOLIC_ARGS) && strlen(value_to_symstr(value, buf, 0))) fprintf(fp, "%s", buf); else fprintf(fp, "%lx", value); } fprintf(fp, ")\n"); if (bt->flags & BT_LINE_NUMBERS) x86_dump_line_number(int_eframe[INT_EFRAME_EIP]); return(frame_number); } /* * Dump an exception frame, coming from either source of stack trace code. * (i.e., -fomit-frame-pointer or not) */ void x86_dump_eframe_common(struct bt_info *bt, ulong *int_eframe, int kernel) { struct syment *sp; ulong offset; if (bt && BT_REFERENCE_CHECK(bt)) { if (!(bt->ref->cmdflags & BT_REF_HEXVAL)) return; if ((int_eframe[INT_EFRAME_EAX] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EBX] == bt->ref->hexval) || (int_eframe[INT_EFRAME_ECX] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EDX] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EBP] == bt->ref->hexval) || (int_eframe[INT_EFRAME_ESI] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EDI] == bt->ref->hexval) || ((short)int_eframe[INT_EFRAME_ES] == (short)bt->ref->hexval) || ((short)int_eframe[INT_EFRAME_DS] == (short)bt->ref->hexval) || ((short)int_eframe[INT_EFRAME_CS] == (short)bt->ref->hexval) || (int_eframe[INT_EFRAME_EIP] == bt->ref->hexval) || (int_eframe[INT_EFRAME_ERR] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EFLAGS] == bt->ref->hexval)) bt->ref->cmdflags |= BT_REF_FOUND; if (!kernel) { if ((int_eframe[INT_EFRAME_ESP] == bt->ref->hexval) || ((short)int_eframe[INT_EFRAME_SS] == (short)bt->ref->hexval)) bt->ref->cmdflags |= BT_REF_FOUND; } return; } if (kernel) { if (bt && (bt->flags & BT_EFRAME_SEARCH)) { fprintf(fp, " [exception EIP: "); if ((sp = value_search(int_eframe[INT_EFRAME_EIP], &offset))) { fprintf(fp, "%s", sp->name); if (offset) fprintf(fp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); } else fprintf(fp, "unknown or invalid address"); fprintf(fp, "]\n"); } fprintf(fp, " EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx EBP: %08lx \n", int_eframe[INT_EFRAME_EAX], int_eframe[INT_EFRAME_EBX], int_eframe[INT_EFRAME_ECX], int_eframe[INT_EFRAME_EDX], int_eframe[INT_EFRAME_EBP]); } else fprintf(fp, " EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx \n", int_eframe[INT_EFRAME_EAX], int_eframe[INT_EFRAME_EBX], int_eframe[INT_EFRAME_ECX], int_eframe[INT_EFRAME_EDX]); fprintf(fp, " DS: %04x ESI: %08lx ES: %04x EDI: %08lx", (short)int_eframe[INT_EFRAME_DS], int_eframe[INT_EFRAME_ESI], (short)int_eframe[INT_EFRAME_ES], int_eframe[INT_EFRAME_EDI]); if (kernel && (INT_EFRAME_GS != -1)) fprintf(fp, " GS: %04x", (short)int_eframe[INT_EFRAME_GS]); fprintf(fp, "\n"); if (!kernel) { fprintf(fp, " SS: %04x ESP: %08lx EBP: %08lx", (short)int_eframe[INT_EFRAME_SS], int_eframe[INT_EFRAME_ESP], int_eframe[INT_EFRAME_EBP]); if (INT_EFRAME_GS != -1) fprintf(fp, " GS: %04x", (short)int_eframe[INT_EFRAME_GS]); fprintf(fp, "\n"); } fprintf(fp, " CS: %04x EIP: %08lx ERR: %08lx EFLAGS: %08lx \n", (short)int_eframe[INT_EFRAME_CS], int_eframe[INT_EFRAME_EIP], int_eframe[INT_EFRAME_ERR], int_eframe[INT_EFRAME_EFLAGS]); } /* * Catch a few functions that show up as rodata but really are * functions. */ int is_rodata_text(ulong callpc) { struct syment *sp; if (!is_rodata(callpc, &sp)) return FALSE; if (strstr(sp->name, "interrupt") || strstr(sp->name, "call_")) return TRUE; return FALSE; } static int check_for_eframe(char *name, struct bt_info *bt) { int i; ulong *ip; char buf[BUFSIZE]; ip = read_idt_table(READ_IDT_RUNTIME); for (i = 0; i < 256; i++, ip += 2) { if (STREQ(name, extract_idt_function(ip, buf, NULL))) return IDT_DIRECT_ENTRY; } if (STREQ(name, "ret_from_intr") || STREQ(name, "call_call_function_interrupt") || STREQ(name, "call_reschedule_interrupt") || STREQ(name, "call_invalidate_interrupt")) return RET_FROM_INTR; if (STREQ(name, "error_code")) return IDT_JMP_ERROR_CODE; if (STREQ(name, "signal_return")) return SIGNAL_RETURN; return FALSE; } /* * Return the syment of the function that did the "jmp error_code". */ struct syment * x86_jmp_error_code(ulong callpc) { struct syment *sp; if (!(sp = value_search(callpc, NULL)) || !STRNEQ(sp->name, "do_")) return NULL; return (symbol_search(sp->name + strlen("do_"))); } static const char *hook_files[] = { "arch/i386/kernel/entry.S", "arch/i386/kernel/head.S", "arch/i386/kernel/semaphore.c" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define SEMAPHORE_C ((char **)&hook_files[2]) static struct line_number_hook x86_line_number_hooks[] = { {"lcall7", ENTRY_S}, {"lcall27", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"system_call", ENTRY_S}, {"ret_from_sys_call", ENTRY_S}, {"ret_from_intr", ENTRY_S}, {"divide_error", ENTRY_S}, {"coprocessor_error", ENTRY_S}, {"simd_coprocessor_error", ENTRY_S}, {"device_not_available", ENTRY_S}, {"debug", ENTRY_S}, {"nmi", ENTRY_S}, {"int3", ENTRY_S}, {"overflow", ENTRY_S}, {"bounds", ENTRY_S}, {"invalid_op", ENTRY_S}, {"coprocessor_segment_overrun", ENTRY_S}, {"double_fault", ENTRY_S}, {"invalid_TSS", ENTRY_S}, {"segment_not_present", ENTRY_S}, {"stack_segment", ENTRY_S}, {"general_protection", ENTRY_S}, {"alignment_check", ENTRY_S}, {"page_fault", ENTRY_S}, {"machine_check", ENTRY_S}, {"spurious_interrupt_bug", ENTRY_S}, {"v86_signal_return", ENTRY_S}, {"tracesys", ENTRY_S}, {"tracesys_exit", ENTRY_S}, {"badsys", ENTRY_S}, {"ret_from_exception", ENTRY_S}, {"reschedule", ENTRY_S}, {"error_code", ENTRY_S}, {"device_not_available_emulate", ENTRY_S}, {"restore_all", ENTRY_S}, {"signal_return", ENTRY_S}, {"L6", HEAD_S}, {"_text", HEAD_S}, {"startup_32", HEAD_S}, {"checkCPUtype", HEAD_S}, {"is486", HEAD_S}, {"is386", HEAD_S}, {"ready", HEAD_S}, {"check_x87", HEAD_S}, {"setup_idt", HEAD_S}, {"rp_sidt", HEAD_S}, {"stack_start", HEAD_S}, {"int_msg", HEAD_S}, {"ignore_int", HEAD_S}, {"idt_descr", HEAD_S}, {"idt", HEAD_S}, {"gdt_descr", HEAD_S}, {"gdt", HEAD_S}, {"swapper_pg_dir", HEAD_S}, {"pg0", HEAD_S}, {"pg1", HEAD_S}, {"empty_zero_page", HEAD_S}, {"__down_failed", SEMAPHORE_C}, {"__down_failed_interruptible", SEMAPHORE_C}, {"__down_failed_trylock", SEMAPHORE_C}, {"__up_wakeup", SEMAPHORE_C}, {"__write_lock_failed", SEMAPHORE_C}, {"__read_lock_failed", SEMAPHORE_C}, {NULL, NULL} /* list must be NULL-terminated */ }; static void x86_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) { fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); } else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } /* * Look for likely exception frames in a stack. */ struct x86_pt_regs { ulong reg_value[MAX_USER_EFRAME_SIZE]; }; /* * Searches from addr within the stackframe defined by bt * for the next set of bytes that matches an exception frame pattern. * Returns either the address of the frame or 0. */ static ulong x86_next_eframe(ulong addr, struct bt_info *bt) { ulong *first, *last; struct x86_pt_regs *pt; ulong *stack; ulong rv; stack = (ulong *)bt->stackbuf; if (!INSTACK(addr, bt)) { return(0); } rv = 0; first = stack + ((addr - bt->stackbase) / sizeof(ulong)); last = stack + (((bt->stacktop - bt->stackbase) - SIZE(pt_regs)) / sizeof(ulong)); for ( ; first <= last; first++) { pt = (struct x86_pt_regs *)first; /* check for kernel exception frame */ if (((short)pt->reg_value[INT_EFRAME_CS] == 0x10) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x18) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x18) && IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { if (!(machdep->flags & OMIT_FRAME_PTR) && !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) continue; rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } if (((short)pt->reg_value[INT_EFRAME_CS] == 0x60) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x68) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x68) && IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { if (!(machdep->flags & OMIT_FRAME_PTR) && !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) continue; rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } if (((short)pt->reg_value[INT_EFRAME_CS] == 0x60) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { if (!(machdep->flags & OMIT_FRAME_PTR) && !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) continue; rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } if (XEN() && ((short)pt->reg_value[INT_EFRAME_CS] == 0x61) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { if (!(machdep->flags & OMIT_FRAME_PTR) && !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) continue; rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } /* check for user exception frame */ if (((short)pt->reg_value[INT_EFRAME_CS] == 0x23) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x2b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x2b) && ((short)pt->reg_value[INT_EFRAME_SS] == 0x2b) && IS_UVADDR(pt->reg_value[INT_EFRAME_EIP], bt->tc) && IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) { rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } if (((short)pt->reg_value[INT_EFRAME_CS] == 0x73) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_SS] == 0x7b) && IS_UVADDR(pt->reg_value[INT_EFRAME_EIP], bt->tc) && IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) { rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } /* * 2.6 kernels using sysenter_entry instead of system_call * have a funky trampoline EIP address. */ if (((short)pt->reg_value[INT_EFRAME_CS] == 0x73) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_SS] == 0x7b) && (pt->reg_value[INT_EFRAME_EFLAGS] == 0x246) && IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) { rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } } return(rv); } static int x86_eframe_search(struct bt_info *bt_in) { ulong addr; struct x86_pt_regs *pt; struct eframe eframe, *ep; struct bt_info bt_local, *bt; ulong flagsave; ulong irqstack; short cs; char *mode, *ibuf; int c, cnt; bt = bt_in; ibuf = NULL; cnt = 0; if (bt->flags & BT_EFRAME_SEARCH2) { if (!(tt->flags & IRQSTACKS)) { error(FATAL, "this kernel does not have IRQ stacks\n"); return 0; } BCOPY(bt_in, &bt_local, sizeof(struct bt_info)); bt = &bt_local; bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; bt->hp->esp = tt->hardirq_ctx[c]; fprintf(fp, "CPU %d HARD IRQ STACK:\n", c); if ((cnt = x86_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } for (c = 0; c < NR_CPUS; c++) { if (tt->softirq_ctx[c]) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; bt->hp->esp = tt->softirq_ctx[c]; fprintf(fp, "CPU %d SOFT IRQ STACK:\n", c); if ((cnt = x86_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } return 0; } if (bt->hp && bt->hp->esp) { BCOPY(bt_in, &bt_local, sizeof(struct bt_info)); bt = &bt_local; addr = bt->hp->esp; if ((irqstack = x86_in_irqstack(addr))) { bt->stackbase = irqstack; bt->stacktop = irqstack + SIZE(irq_ctx); if (SIZE(irq_ctx) > STACKSIZE()) { ibuf = (char *)GETBUF(SIZE(irq_ctx)); bt->stackbuf = ibuf; } alter_stackbuf(bt); } else if (!INSTACK(addr, bt)) error(FATAL, "unrecognized stack address for this task: %lx\n", bt->hp->esp); } else if (tt->flags & THREAD_INFO) addr = bt->stackbase + roundup(SIZE(thread_info), sizeof(ulong)); else addr = bt->stackbase + roundup(SIZE(task_struct), sizeof(ulong)); ep = &eframe; BZERO(ep, sizeof(struct eframe)); while ((addr = x86_next_eframe(addr, bt)) != 0) { cnt++; if (bt->flags & BT_EFRAME_COUNT) { addr += 4; continue; } pt = (struct x86_pt_regs *) (bt->stackbuf + (addr - bt->stackbase)); ep->eframe_addr = addr; cs = pt->reg_value[INT_EFRAME_CS]; if ((cs == 0x23) || (cs == 0x73)) { mode = "USER-MODE"; } else if ((cs == 0x10) || (cs == 0x60)) { mode = "KERNEL-MODE"; } else if (XEN() && (cs == 0x61)) { mode = "KERNEL-MODE"; } else { mode = "UNKNOWN-MODE"; } fprintf(fp, "%s %s EXCEPTION FRAME AT %lx:\n", bt->flags & BT_EFRAME_SEARCH ? "\n" : "", mode, ep->eframe_addr); flagsave = bt->flags; bt->flags |= BT_EFRAME_SEARCH; dump_eframe(ep, 0, bt); bt->flags = flagsave; addr += 4; } if (ibuf) FREEBUF(ibuf); return cnt; } static ulong x86_in_irqstack(ulong addr) { int c; if (!(tt->flags & IRQSTACKS)) return 0; for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { if ((addr >= tt->hardirq_ctx[c]) && (addr < (tt->hardirq_ctx[c] + SIZE(irq_ctx)))) return(tt->hardirq_ctx[c]); } if (tt->softirq_ctx[c]) { if ((addr >= tt->softirq_ctx[c]) && (addr < (tt->softirq_ctx[c] + SIZE(irq_ctx)))) return(tt->softirq_ctx[c]); } } return 0; } /* * Dump the kernel-entry user-mode exception frame. */ static void x86_user_eframe(struct bt_info *bt) { struct eframe eframe, *ep; struct x86_pt_regs x86_pt_regs, *pt; ulong pt_regs_addr; pt_regs_addr = USER_EFRAME_ADDR(bt->task); readmem(pt_regs_addr, KVADDR, &x86_pt_regs, sizeof(struct x86_pt_regs), "x86 pt_regs", FAULT_ON_ERROR); pt = &x86_pt_regs; if (((short)pt->reg_value[INT_EFRAME_CS] == 0x23) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x2b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x2b) && ((short)pt->reg_value[INT_EFRAME_SS] == 0x2b) && IS_UVADDR(pt->reg_value[INT_EFRAME_EIP], bt->tc) && IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc) && IS_UVADDR(pt->reg_value[INT_EFRAME_EBP], bt->tc)) { ep = &eframe; BZERO(ep, sizeof(struct eframe)); ep->eframe_addr = pt_regs_addr; bt->flags |= BT_EFRAME_SEARCH; dump_eframe(ep, 0, bt); bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH; } } /* * Do all necessary machine-specific setup here. This is called three times, * during symbol table initialization, and before and after GDB has been * initialized. */ struct machine_specific x86_machine_specific = { 0 }; static int PGDIR_SHIFT; static int PTRS_PER_PTE; static int PTRS_PER_PGD; void x86_init(int when) { struct syment *sp, *spn; if (XEN_HYPER_MODE()) { x86_init_hyper(when); return; } switch (when) { case SETUP_ENV: machdep->process_elf_notes = x86_process_elf_notes; break; case PRE_SYMTAB: machdep->verify_symbol = x86_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->machspec = &x86_machine_specific; machdep->verify_paddr = generic_verify_paddr; break; case PRE_GDB: if (symbol_exists("pae_pgd_cachep") || ((sp = symbol_search("pkmap_count")) && (spn = next_symbol(NULL, sp)) && (((spn->value - sp->value)/sizeof(int)) == 512))) { machdep->flags |= PAE; PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL; machdep->uvtop = x86_uvtop_PAE; machdep->kvtop = x86_kvtop_PAE; } else { PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; PTRS_PER_PGD = PTRS_PER_PGD_2LEVEL; machdep->uvtop = x86_uvtop; machdep->kvtop = x86_kvtop; free(machdep->pmd); machdep->pmd = machdep->pgd; } machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->kvbase = symbol_value("_stext") & ~KVBASE_MASK; if (machdep->kvbase & 0x80000000) machdep->is_uvaddr = generic_is_uvaddr; else { vt->flags |= COMMON_VADDR; machdep->is_uvaddr = x86_is_uvaddr; } machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->eframe_search = x86_eframe_search; machdep->back_trace = x86_back_trace_cmd; machdep->processor_speed = x86_processor_speed; machdep->get_task_pgd = x86_get_task_pgd; machdep->dump_irq = generic_dump_irq; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->show_interrupts = generic_show_interrupts; machdep->get_stack_frame = x86_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = x86_translate_pte; machdep->memory_size = x86_memory_size; machdep->vmalloc_start = x86_vmalloc_start; machdep->is_task_addr = x86_is_task_addr; machdep->dis_filter = x86_dis_filter; machdep->cmd_mach = x86_cmd_mach; machdep->get_smp_cpus = x86_get_smp_cpus; machdep->flags |= FRAMESIZE_DEBUG; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = x86_init_kernel_pgd; machdep->xendump_p2m_create = x86_xendump_p2m_create; machdep->xen_kdump_p2m_create = x86_xen_kdump_p2m_create; machdep->xendump_panic_task = x86_xendump_panic_task; machdep->get_xendump_regs = x86_get_xendump_regs; machdep->clear_machdep_cache = x86_clear_machdep_cache; break; case POST_GDB: if (x86_omit_frame_pointer()) machdep->flags |= OMIT_FRAME_PTR; STRUCT_SIZE_INIT(user_regs_struct, "user_regs_struct"); if (MEMBER_EXISTS("user_regs_struct", "ebp")) MEMBER_OFFSET_INIT(user_regs_struct_ebp, "user_regs_struct", "ebp"); else MEMBER_OFFSET_INIT(user_regs_struct_ebp, "user_regs_struct", "bp"); if (MEMBER_EXISTS("user_regs_struct", "esp")) MEMBER_OFFSET_INIT(user_regs_struct_esp, "user_regs_struct", "esp"); else MEMBER_OFFSET_INIT(user_regs_struct_esp, "user_regs_struct", "sp"); if (MEMBER_EXISTS("user_regs_struct", "eip")) MEMBER_OFFSET_INIT(user_regs_struct_eip, "user_regs_struct", "eip"); else MEMBER_OFFSET_INIT(user_regs_struct_eip, "user_regs_struct", "ip"); if (MEMBER_EXISTS("user_regs_struct", "eax")) MEMBER_OFFSET_INIT(user_regs_struct_eax, "user_regs_struct", "eax"); else MEMBER_OFFSET_INIT(user_regs_struct_eax, "user_regs_struct", "ax"); if (MEMBER_EXISTS("user_regs_struct", "ebx")) MEMBER_OFFSET_INIT(user_regs_struct_ebx, "user_regs_struct", "ebx"); else MEMBER_OFFSET_INIT(user_regs_struct_ebx, "user_regs_struct", "bx"); if (MEMBER_EXISTS("user_regs_struct", "ecx")) MEMBER_OFFSET_INIT(user_regs_struct_ecx, "user_regs_struct", "ecx"); else MEMBER_OFFSET_INIT(user_regs_struct_ecx, "user_regs_struct", "cx"); if (MEMBER_EXISTS("user_regs_struct", "edx")) MEMBER_OFFSET_INIT(user_regs_struct_edx, "user_regs_struct", "edx"); else MEMBER_OFFSET_INIT(user_regs_struct_edx, "user_regs_struct", "dx"); if (MEMBER_EXISTS("user_regs_struct", "esi")) MEMBER_OFFSET_INIT(user_regs_struct_esi, "user_regs_struct", "esi"); else MEMBER_OFFSET_INIT(user_regs_struct_esi, "user_regs_struct", "si"); if (MEMBER_EXISTS("user_regs_struct", "edi")) MEMBER_OFFSET_INIT(user_regs_struct_edi, "user_regs_struct", "edi"); else MEMBER_OFFSET_INIT(user_regs_struct_edi, "user_regs_struct", "di"); if (MEMBER_EXISTS("user_regs_struct", "eflags")) MEMBER_OFFSET_INIT(user_regs_struct_eflags, "user_regs_struct", "eflags"); else MEMBER_OFFSET_INIT(user_regs_struct_eflags, "user_regs_struct", "flags"); MEMBER_OFFSET_INIT(user_regs_struct_cs, "user_regs_struct", "cs"); MEMBER_OFFSET_INIT(user_regs_struct_ds, "user_regs_struct", "ds"); MEMBER_OFFSET_INIT(user_regs_struct_es, "user_regs_struct", "es"); MEMBER_OFFSET_INIT(user_regs_struct_fs, "user_regs_struct", "fs"); MEMBER_OFFSET_INIT(user_regs_struct_gs, "user_regs_struct", "gs"); MEMBER_OFFSET_INIT(user_regs_struct_ss, "user_regs_struct", "ss"); if (!VALID_STRUCT(user_regs_struct)) { /* Use this hardwired version -- sometimes the * debuginfo doesn't pick this up even though * it exists in the kernel; it shouldn't change. */ struct x86_user_regs_struct { long ebx, ecx, edx, esi, edi, ebp, eax; unsigned short ds, __ds, es, __es; unsigned short fs, __fs, gs, __gs; long orig_eax, eip; unsigned short cs, __cs; long eflags, esp; unsigned short ss, __ss; }; ASSIGN_SIZE(user_regs_struct) = sizeof(struct x86_user_regs_struct); ASSIGN_OFFSET(user_regs_struct_ebp) = offsetof(struct x86_user_regs_struct, ebp); ASSIGN_OFFSET(user_regs_struct_esp) = offsetof(struct x86_user_regs_struct, esp); ASSIGN_OFFSET(user_regs_struct_eip) = offsetof(struct x86_user_regs_struct, eip); ASSIGN_OFFSET(user_regs_struct_eax) = offsetof(struct x86_user_regs_struct, eax); ASSIGN_OFFSET(user_regs_struct_ebx) = offsetof(struct x86_user_regs_struct, ebx); ASSIGN_OFFSET(user_regs_struct_ecx) = offsetof(struct x86_user_regs_struct, ecx); ASSIGN_OFFSET(user_regs_struct_edx) = offsetof(struct x86_user_regs_struct, edx); ASSIGN_OFFSET(user_regs_struct_esi) = offsetof(struct x86_user_regs_struct, esi); ASSIGN_OFFSET(user_regs_struct_edi) = offsetof(struct x86_user_regs_struct, edi); ASSIGN_OFFSET(user_regs_struct_eflags) = offsetof(struct x86_user_regs_struct, eflags); ASSIGN_OFFSET(user_regs_struct_cs) = offsetof(struct x86_user_regs_struct, cs); ASSIGN_OFFSET(user_regs_struct_ds) = offsetof(struct x86_user_regs_struct, ds); ASSIGN_OFFSET(user_regs_struct_es) = offsetof(struct x86_user_regs_struct, es); ASSIGN_OFFSET(user_regs_struct_fs) = offsetof(struct x86_user_regs_struct, fs); ASSIGN_OFFSET(user_regs_struct_gs) = offsetof(struct x86_user_regs_struct, gs); ASSIGN_OFFSET(user_regs_struct_ss) = offsetof(struct x86_user_regs_struct, ss); } MEMBER_OFFSET_INIT(thread_struct_cr3, "thread_struct", "cr3"); STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); STRUCT_SIZE_INIT(e820map, "e820map"); STRUCT_SIZE_INIT(e820entry, "e820entry"); STRUCT_SIZE_INIT(irq_ctx, "irq_ctx"); MEMBER_OFFSET_INIT(e820map_nr_map, "e820map", "nr_map"); MEMBER_OFFSET_INIT(e820entry_addr, "e820entry", "addr"); MEMBER_OFFSET_INIT(e820entry_size, "e820entry", "size"); MEMBER_OFFSET_INIT(e820entry_type, "e820entry", "type"); if (KVMDUMP_DUMPFILE()) set_kvm_iohole(NULL); if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 224; /* NR_IRQS */ if (!machdep->hz) { machdep->hz = HZ; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) machdep->hz = 1000; } if (machdep->flags & PAE) { if (THIS_KERNEL_VERSION < LINUX(2,6,26)) machdep->section_size_bits = _SECTION_SIZE_BITS_PAE_ORIG; else machdep->section_size_bits = _SECTION_SIZE_BITS_PAE_2_6_26; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_PAE; } else { machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; } if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { if (machdep->flags & PAE) machdep->uvtop = x86_uvtop_xen_wpt_PAE; else machdep->uvtop = x86_uvtop_xen_wpt; } if (XEN()) { MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs, "vcpu_guest_context", "user_regs"); MEMBER_OFFSET_INIT(cpu_user_regs_esp, "cpu_user_regs", "esp"); MEMBER_OFFSET_INIT(cpu_user_regs_eip, "cpu_user_regs", "eip"); } if (THIS_KERNEL_VERSION < LINUX(2,6,24)) machdep->line_number_hooks = x86_line_number_hooks; eframe_init(); if (THIS_KERNEL_VERSION >= LINUX(2,6,28)) machdep->machspec->page_protnone = _PAGE_GLOBAL; else machdep->machspec->page_protnone = _PAGE_PSE; STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); STRUCT_SIZE_INIT(percpu_data, "percpu_data"); if (!remap_init()) machdep->machspec->max_numnodes = -1; break; case POST_INIT: read_idt_table(READ_IDT_INIT); break; case LOG_ONLY: machdep->kvbase = kt->vmcoreinfo._stext_SYMBOL & ~KVBASE_MASK; break; } } /* * Account for addition of pt_regs.xgs field in 2.6.20+ kernels. */ static void eframe_init(void) { if (INVALID_SIZE(pt_regs)) { if (THIS_KERNEL_VERSION < LINUX(2,6,20)) ASSIGN_SIZE(pt_regs) = (MAX_USER_EFRAME_SIZE-2)*sizeof(ulong); else { ASSIGN_SIZE(pt_regs) = MAX_USER_EFRAME_SIZE*sizeof(ulong); INT_EFRAME_SS = 15; INT_EFRAME_ESP = 14; INT_EFRAME_EFLAGS = 13; INT_EFRAME_CS = 12; INT_EFRAME_EIP = 11; INT_EFRAME_ERR = 10; INT_EFRAME_GS = 9; } return; } if (MEMBER_EXISTS("pt_regs", "esp")) { INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "xss") / 4; INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "esp") / 4; INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "eflags") / 4; INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "xcs") / 4; INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "eip") / 4; INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_eax") / 4; if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "xgs")) != -1) INT_EFRAME_GS /= 4; INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "xes") / 4; INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "xds") / 4; INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "eax") / 4; INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "ebp") / 4; INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "edi") / 4; INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "esi") / 4; INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "edx") / 4; INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "ecx") / 4; INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "ebx") / 4; } else { INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "ss") / 4; INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "sp") / 4; INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "flags") / 4; INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "cs") / 4; INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "ip") / 4; INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_ax") / 4; if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "gs")) != -1) INT_EFRAME_GS /= 4; INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "es") / 4; INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "ds") / 4; INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "ax") / 4; INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "bp") / 4; INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "di") / 4; INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "si") / 4; INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "dx") / 4; INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "cx") / 4; INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "bx") / 4; } } /* * Locate regions remapped by the remap allocator */ static int remap_init(void) { ulong start_vaddr, end_vaddr, start_pfn; int max_numnodes; struct machine_specific *ms; struct syment *sp; if (! (sp = symbol_search("node_remap_start_vaddr")) ) return FALSE; start_vaddr = sp->value; if (! (sp = symbol_search("node_remap_end_vaddr")) ) return FALSE; end_vaddr = sp->value; if (! (sp = symbol_search("node_remap_start_pfn")) ) return FALSE; start_pfn = sp->value; max_numnodes = get_array_length("node_remap_start_pfn", NULL, sizeof(ulong)); if (max_numnodes < 1) max_numnodes = 1; ms = machdep->machspec; ms->remap_start_vaddr = calloc(3 * max_numnodes, sizeof(ulong)); if (!ms->remap_start_vaddr) error(FATAL, "cannot malloc remap array"); ms->remap_end_vaddr = ms->remap_start_vaddr + max_numnodes; ms->remap_start_pfn = ms->remap_end_vaddr + max_numnodes; readmem(start_vaddr, KVADDR, ms->remap_start_vaddr, max_numnodes * sizeof(ulong), "node_remap_start_vaddr", FAULT_ON_ERROR); readmem(end_vaddr, KVADDR, ms->remap_end_vaddr, max_numnodes * sizeof(ulong), "node_remap_end_vaddr", FAULT_ON_ERROR); readmem(start_pfn, KVADDR, ms->remap_start_pfn, max_numnodes * sizeof(ulong), "node_remap_end_vaddr", FAULT_ON_ERROR); ms->max_numnodes = max_numnodes; return TRUE; } static int x86_kvtop_remap(ulong kvaddr, physaddr_t *paddr) { struct machine_specific *ms; int i; ms = machdep->machspec; /* ms->max_numnodes is -1 when unused. */ for (i = 0; i < ms->max_numnodes; ++i) { if (kvaddr >= ms->remap_start_vaddr[i] && kvaddr < ms->remap_end_vaddr[i]) { *paddr = PTOB(ms->remap_start_pfn[i]) + kvaddr - ms->remap_start_vaddr[i]; return TRUE; } } return FALSE; } /* * Needs to be done this way because of potential 4G/4G split. */ static int x86_is_uvaddr(ulong vaddr, struct task_context *tc) { return IN_TASK_VMA(tc->task, vaddr); } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ #define _4MB_PAGE_MASK (~((MEGABYTES(4))-1)) #define _2MB_PAGE_MASK (~((MEGABYTES(2))-1)) static int x86_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_cr3)) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no cr3 or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (vaddr >> PGDIR_SHIFT); FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!(pgd_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) goto no_upage; if (pgd_pte & _PAGE_4M) { if (verbose) { fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); x86_translate_pte(pgd_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pgd_pte) + (vaddr & ~_4MB_PAGE_MASK); return TRUE; } page_middle = page_dir; FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if (!pmd_pte) goto no_upage; #ifdef PTES_IN_LOWMEM page_table = (ulong *)(PTOV(NONPAE_PAGEBASE(pmd_pte)) + ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2))); FILL_PTBL(NONPAE_PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); #else page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2))); FILL_PTBL(NONPAE_PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); #endif if (verbose) fprintf(fp, " PTE: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_table)), pte); if (!(pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = NONPAE_PAGEBASE(pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pte)))); x86_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_uvtop_xen_wpt(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *machine_page_table, *pseudo_page_table; ulong pgd_pte, pseudo_pgd_pte; ulong pmd_pte; ulong machine_pte, pseudo_pte; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_cr3)) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no cr3 or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (vaddr >> PGDIR_SHIFT); FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) goto no_upage; if (pgd_pte & _PAGE_4M) { if (verbose) fprintf(fp, " PAGE: %s (4MB) [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte)); if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pgd_pte |= PAGEOFFSET(pgd_pte); if (verbose) { fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte)))); x86_translate_pte(pseudo_pgd_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + (vaddr & ~_4MB_PAGE_MASK); return TRUE; } page_middle = page_dir; FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if (!pmd_pte) goto no_upage; machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2))); pseudo_page_table = (ulong *) xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table)); FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE()); machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table)); if (verbose) { fprintf(fp, " PTE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)machine_page_table))); fprintf(fp, " PTE: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pseudo_page_table + PAGEOFFSET(machine_page_table))), machine_pte); } if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = machine_pte; if (machine_pte && verbose) { fprintf(fp, "\n"); x86_translate_pte(machine_pte, 0, 0); } goto no_upage; } pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte)); pseudo_pte |= PAGEOFFSET(machine_pte); *paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(machine_pte)))); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pseudo_pte)))); x86_translate_pte(pseudo_pte, 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_uvtop_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulonglong *pgd; ulonglong page_dir_entry; ulonglong page_middle; ulonglong page_middle_entry; ulonglong page_table; ulonglong page_table_entry; ulonglong physpage; ulonglong ull; ulong offset; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_cr3)) pgd = (ulonglong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no cr3 or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = (ulonglong *)(ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd))); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); offset = ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * sizeof(ulonglong); page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); if (verbose) fprintf(fp, " PGD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), page_dir_entry); if (!(page_dir_entry & _PAGE_PRESENT)) { goto no_upage; } page_middle = PAE_PAGEBASE(page_dir_entry); FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE()); offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_middle_entry); } if (!(page_middle_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { goto no_upage; } if (page_middle_entry & _PAGE_PSE) { if (verbose) { ull = PAE_PAGEBASE(page_middle_entry); fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_middle_entry); } physpage = PAE_PAGEBASE(page_middle_entry) + (vaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } page_table = PAE_PAGEBASE(page_middle_entry); FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE()); offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_table_entry); } if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = page_table_entry; if (page_table_entry && verbose) { fprintf(fp, "\n"); x86_translate_pte(0, 0, page_table_entry); } goto no_upage; } physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(vaddr); *paddr = physpage; if (verbose) { ull = PAE_PAGEBASE(page_table_entry); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_table_entry); } return TRUE; no_upage: return FALSE; } static int x86_uvtop_xen_wpt_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulonglong *pgd; ulonglong page_dir_entry; ulonglong page_middle, pseudo_page_middle; ulonglong page_middle_entry; ulonglong page_table, pseudo_page_table; ulonglong page_table_entry, pte; ulonglong physpage, pseudo_physpage; ulonglong ull; ulong offset; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_cr3)) pgd = (ulonglong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no cr3 or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = (ulonglong *)(ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd))); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); offset = ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * sizeof(ulonglong); page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); if (verbose) fprintf(fp, " PGD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), page_dir_entry); if (!(page_dir_entry & _PAGE_PRESENT)) { goto no_upage; } page_middle = PAE_PAGEBASE(page_dir_entry); pseudo_page_middle = xen_m2p(page_middle); if (verbose) fprintf(fp, " PGD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), pseudo_page_middle | PAGEOFFSET(page_dir_entry) | (page_dir_entry & _PAGE_NX)); FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE()); offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_middle_entry); } if (!(page_middle_entry & _PAGE_PRESENT)) { goto no_upage; } if (page_middle_entry & _PAGE_PSE) { error(FATAL, "_PAGE_PSE in an mfn not supported\n"); /* XXX */ if (verbose) { ull = PAE_PAGEBASE(page_middle_entry); fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_middle_entry); } physpage = PAE_PAGEBASE(page_middle_entry) + (vaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } page_table = PAE_PAGEBASE(page_middle_entry); pseudo_page_table = xen_m2p(page_table); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), pseudo_page_table | PAGEOFFSET(page_middle_entry) | (page_middle_entry & _PAGE_NX)); } FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE()); offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_table_entry); } if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = page_table_entry; if (page_table_entry && verbose) { fprintf(fp, "\n"); x86_translate_pte(0, 0, page_table_entry); } goto no_upage; } physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(vaddr); pseudo_physpage = xen_m2p(physpage); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), pseudo_physpage | PAGEOFFSET(page_table_entry) | (page_table_entry & _PAGE_NX)); } *paddr = pseudo_physpage + PAGEOFFSET(vaddr); if (verbose) { physpage = PAE_PAGEBASE(physpage); fprintf(fp, " PAGE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&physpage))); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&pseudo_physpage))); pte = pseudo_physpage | PAGEOFFSET(page_table_entry) | (page_table_entry & _PAGE_NX); x86_translate_pte(0, 0, pte); } return TRUE; no_upage: return FALSE; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int x86_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; char buf[BUFSIZE]; if (!IS_KVADDR(kvaddr)) return FALSE; if (XEN_HYPER_MODE()) { if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } pgd = (ulong *)symbol_value("idle_pg_table_l2"); } else { if (x86_kvtop_remap(kvaddr, paddr)) { if (!verbose) return TRUE; } else if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } else if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) return (x86_kvtop_xen_wpt(tc, kvaddr, paddr, verbose)); pgd = (ulong *)vt->kernel_pgd[0]; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (kvaddr >> PGDIR_SHIFT); FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) goto no_kpage; if (pgd_pte & _PAGE_4M) { if (verbose) { fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); x86_translate_pte(pgd_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pgd_pte) + (kvaddr & ~_4MB_PAGE_MASK); return TRUE; } page_middle = page_dir; FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if (!pmd_pte) goto no_kpage; #ifdef PTES_IN_LOWMEM page_table = (ulong *)(PTOV(NONPAE_PAGEBASE(pmd_pte)) + ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2))); FILL_PTBL(NONPAE_PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); #else page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2))); FILL_PTBL(NONPAE_PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); #endif if (verbose) fprintf(fp, " PTE: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_table)), pte); if (!(pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { if (pte && verbose) { fprintf(fp, "\n"); x86_translate_pte(pte, 0, 0); } goto no_kpage; } if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pte)))); x86_translate_pte(pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pte) + PAGEOFFSET(kvaddr); return TRUE; no_kpage: return FALSE; } static int x86_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *machine_page_table, *pseudo_page_table; ulong pgd_pte, pseudo_pgd_pte; ulong pmd_pte; ulong machine_pte, pseudo_pte; char buf[BUFSIZE]; pgd = (ulong *)vt->kernel_pgd[0]; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (kvaddr >> PGDIR_SHIFT); FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) goto no_kpage; if (pgd_pte & _PAGE_4M) { if (verbose) fprintf(fp, " PAGE: %s (4MB) [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte)); if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pgd_pte |= PAGEOFFSET(pgd_pte); if (verbose) { fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte)))); x86_translate_pte(pseudo_pgd_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + (kvaddr & ~_4MB_PAGE_MASK); return TRUE; } page_middle = page_dir; FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if (!pmd_pte) goto no_kpage; machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2))); pseudo_page_table = (ulong *) xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table)); FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE()); machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table)); if (verbose) { fprintf(fp, " PTE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)machine_page_table))); fprintf(fp, " PTE: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pseudo_page_table + PAGEOFFSET(machine_page_table))), machine_pte); } if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { if (machine_pte && verbose) { fprintf(fp, "\n"); x86_translate_pte(machine_pte, 0, 0); } goto no_kpage; } pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte)); pseudo_pte |= PAGEOFFSET(machine_pte); if (verbose) { fprintf(fp, " PAGE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(machine_pte)))); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pseudo_pte)))); x86_translate_pte(pseudo_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(kvaddr); return TRUE; no_kpage: return FALSE; } static int x86_kvtop_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulonglong *pgd; ulonglong page_dir_entry; ulonglong page_middle; ulonglong page_middle_entry; ulonglong page_table; ulonglong page_table_entry; ulonglong physpage; ulonglong ull; char buf[BUFSIZE]; ulong offset; if (!IS_KVADDR(kvaddr)) return FALSE; if (XEN_HYPER_MODE()) { if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } if (symbol_exists("idle_pg_table_l3")) pgd = (ulonglong *)symbol_value("idle_pg_table_l3"); else pgd = (ulonglong *)symbol_value("idle_pg_table"); } else { if (x86_kvtop_remap(kvaddr, paddr)) { if (!verbose) return TRUE; } else if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } else if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) return (x86_kvtop_xen_wpt_PAE(tc, kvaddr, paddr, verbose)); pgd = (ulonglong *)vt->kernel_pgd[0]; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); offset = ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * sizeof(ulonglong); page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); if (verbose) fprintf(fp, " PGD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), page_dir_entry); if (!(page_dir_entry & _PAGE_PRESENT)) { goto no_kpage; } page_middle = PAE_PAGEBASE(page_dir_entry); FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE()); offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_middle_entry); } if (!(page_middle_entry & _PAGE_PRESENT)) { goto no_kpage; } if (page_middle_entry & _PAGE_PSE) { if (verbose) { ull = PAE_PAGEBASE(page_middle_entry); fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_middle_entry); } physpage = PAE_PAGEBASE(page_middle_entry) + (kvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } page_table = PAE_PAGEBASE(page_middle_entry); FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE()); offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_table_entry); } if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { if (page_table_entry && verbose) { fprintf(fp, "\n"); x86_translate_pte(0, 0, page_table_entry); } goto no_kpage; } physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(kvaddr); *paddr = physpage; if (verbose) { ull = PAE_PAGEBASE(page_table_entry); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_table_entry); } return TRUE; no_kpage: return FALSE; } static int x86_kvtop_xen_wpt_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulonglong *pgd; ulonglong page_dir_entry; ulonglong page_middle, pseudo_page_middle; ulonglong page_middle_entry; ulonglong page_table, pseudo_page_table; ulonglong page_table_entry, pte; ulonglong physpage, pseudo_physpage; ulonglong ull; ulong offset; char buf[BUFSIZE]; pgd = (ulonglong *)vt->kernel_pgd[0]; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); offset = ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * sizeof(ulonglong); page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); if (verbose) fprintf(fp, " PGD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), page_dir_entry); if (!(page_dir_entry & _PAGE_PRESENT)) { goto no_kpage; } page_middle = PAE_PAGEBASE(page_dir_entry); pseudo_page_middle = xen_m2p(page_middle); if (verbose) fprintf(fp, " PGD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), pseudo_page_middle | PAGEOFFSET(page_dir_entry) | (page_dir_entry & _PAGE_NX)); FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE()); offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_middle_entry); } if (!(page_middle_entry & _PAGE_PRESENT)) { goto no_kpage; } if (page_middle_entry & _PAGE_PSE) { error(FATAL, "_PAGE_PSE in an mfn not supported\n"); /* XXX */ if (verbose) { ull = PAE_PAGEBASE(page_middle_entry); fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_middle_entry); } physpage = PAE_PAGEBASE(page_middle_entry) + (kvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } page_table = PAE_PAGEBASE(page_middle_entry); pseudo_page_table = xen_m2p(page_table); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), pseudo_page_table | PAGEOFFSET(page_middle_entry) | (page_middle_entry & _PAGE_NX)); } FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE()); offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_table_entry); } if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { if (page_table_entry && verbose) { fprintf(fp, "\n"); x86_translate_pte(0, 0, page_table_entry); } goto no_kpage; } physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(kvaddr); pseudo_physpage = xen_m2p(physpage); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), pseudo_physpage | PAGEOFFSET(page_table_entry) | (page_table_entry & _PAGE_NX)); } *paddr = pseudo_physpage + PAGEOFFSET(kvaddr); if (verbose) { physpage = PAE_PAGEBASE(physpage); fprintf(fp, " PAGE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&physpage))); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&pseudo_physpage))); pte = pseudo_physpage | PAGEOFFSET(page_table_entry) | (page_table_entry & _PAGE_NX); x86_translate_pte(0, 0, pte); } return TRUE; no_kpage: return FALSE; } void x86_clear_machdep_cache(void) { machdep->machspec->last_pmd_read_PAE = 0; machdep->machspec->last_ptbl_read_PAE = 0; } /* * Get the relevant page directory pointer from a task structure. */ static ulong x86_get_task_pgd(ulong task) { long offset; ulong cr3; offset = OFFSET_OPTION(task_struct_thread, task_struct_tss); if (INVALID_MEMBER(thread_struct_cr3)) error(FATAL, "cr3 does not exist in this kernel's thread_struct\n"); offset += OFFSET(thread_struct_cr3); readmem(task + offset, KVADDR, &cr3, sizeof(ulong), "task thread cr3", FAULT_ON_ERROR); return(PTOV(cr3)); } /* * Calculate and return the speed of the processor. */ ulong x86_processor_speed(void) { unsigned long cpu_hz, cpu_khz; if (machdep->mhz) return (machdep->mhz); if (symbol_exists("cpu_hz")) { get_symbol_data("cpu_hz", sizeof(long), &cpu_hz); if (cpu_hz) return (machdep->mhz = cpu_hz/1000000); } if (symbol_exists("cpu_khz")) { get_symbol_data("cpu_khz", sizeof(long), &cpu_khz); if (cpu_khz) return(machdep->mhz = cpu_khz/1000); } return 0; } void x86_dump_machdep_table(ulong arg) { int others; ulong xen_wpt; char buf[BUFSIZE]; struct machine_specific *ms; int i, max_numnodes; switch (arg) { default: break; } others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PAE) fprintf(fp, "%sPAE", others++ ? "|" : ""); if (machdep->flags & OMIT_FRAME_PTR) fprintf(fp, "%sOMIT_FRAME_PTR", others++ ? "|" : ""); if (machdep->flags & FRAMESIZE_DEBUG) fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: x86_eframe_search()\n"); fprintf(fp, " back_trace: x86_back_trace_cmd()\n"); fprintf(fp, "get_processor_speed: x86_processor_speed()\n"); xen_wpt = XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES); if (machdep->flags & PAE) { fprintf(fp, " uvtop: %s()\n", xen_wpt ? "x86_uvtop_xen_wpt_PAE" : "x86_uvtop_PAE"); fprintf(fp, " kvtop: x86_kvtop_PAE()%s\n", xen_wpt ? " -> x86_kvtop_xen_wpt_PAE()" : ""); } else { fprintf(fp, " uvtop: %s()\n", xen_wpt ? "x86_uvtop_xen_wpt" : "x86_uvtop"); fprintf(fp, " kvtop: x86_kvtop()%s\n", xen_wpt ? " -> x86_kvtop_xen_wpt()" : ""); } fprintf(fp, " get_task_pgd: x86_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_stack_frame: x86_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: x86_translate_pte()\n"); fprintf(fp, " memory_size: x86_memory_size()\n"); fprintf(fp, " vmalloc_start: x86_vmalloc_start()\n"); fprintf(fp, " is_task_addr: x86_is_task_addr()\n"); fprintf(fp, " verify_symbol: x86_verify_symbol()\n"); fprintf(fp, " dis_filter: x86_dis_filter()\n"); fprintf(fp, " cmd_mach: x86_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: x86_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: %s\n", COMMON_VADDR_SPACE() ? "x86_is_uvaddr()" : "generic_is_uvaddr()"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: x86_init_kernel_pgd()\n"); fprintf(fp, " value_to_symbol: %s\n", machdep->value_to_symbol == generic_machdep_value_to_symbol ? "generic_machdep_value_to_symbol()" : "x86_is_entry_tramp_address()"); fprintf(fp, " line_number_hooks: %s\n", machdep->line_number_hooks ? "x86_line_number_hooks" : "(not used)"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " xendump_p2m_create: x86_xendump_p2m_create()\n"); fprintf(fp, " xendump_p2m_create: %s\n", PVOPS_XEN() ? "x86_pvops_xendump_p2m_create()" : "x86_xendump_p2m_create()"); fprintf(fp, " xendump_panic_task: x86_xendump_panic_task()\n"); fprintf(fp, " get_xendump_regs: x86_get_xendump_regs()\n"); fprintf(fp, "xen_kdump_p2m_create: x86_xen_kdump_p2m_create()\n"); fprintf(fp, "clear_machdep_cache: x86_clear_machdep_cache()\n"); fprintf(fp, " INT_EFRAME_[reg]:\n"); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "SS: "), INT_EFRAME_SS); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ESP: "), INT_EFRAME_ESP); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EFLAGS: "), INT_EFRAME_EFLAGS); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "CS: "), INT_EFRAME_CS); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "IP: "), INT_EFRAME_EIP); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ERR: "), INT_EFRAME_ERR); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ES: "), INT_EFRAME_ES); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "DS: "), INT_EFRAME_DS); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EAX: "), INT_EFRAME_EAX); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EBP: "), INT_EFRAME_EBP); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EDI: "), INT_EFRAME_EDI); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ESI: "), INT_EFRAME_ESI); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EDX: "), INT_EFRAME_EDX); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ECX: "), INT_EFRAME_ECX); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EBX: "), INT_EFRAME_EBX); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "GS: "), INT_EFRAME_GS); fprintf(fp, " machspec: x86_machine_specific\n"); fprintf(fp, " idt_table: %lx\n", (ulong)machdep->machspec->idt_table); fprintf(fp, " entry_tramp_start: %lx\n", machdep->machspec->entry_tramp_start); fprintf(fp, " entry_tramp_end: %lx\n", machdep->machspec->entry_tramp_end); fprintf(fp, " entry_tramp_start_phys: %llx\n", machdep->machspec->entry_tramp_start_phys); fprintf(fp, " last_pmd_read_PAE: %llx\n", machdep->machspec->last_pmd_read_PAE); fprintf(fp, " last_ptbl_read_PAE: %llx\n", machdep->machspec->last_ptbl_read_PAE); fprintf(fp, " page_protnone: %lx\n", machdep->machspec->page_protnone); ms = machdep->machspec; max_numnodes = ms->max_numnodes; fprintf(fp, " MAX_NUMNODES: "); if (max_numnodes < 0) { fprintf(fp, "(unused)\n"); } else { fprintf(fp, "%d\n", max_numnodes); fprintf(fp, " remap_start_vaddr:"); for (i = 0; i < max_numnodes; ++i) { if ((i % 8) == 0) fprintf(fp, "\n "); fprintf(fp, "%08lx ", ms->remap_start_vaddr[i]); } fprintf(fp, "\n"); fprintf(fp, " remap_end_vaddr:"); for (i = 0; i < max_numnodes; ++i) { if ((i % 8) == 0) fprintf(fp, "\n "); fprintf(fp, "%08lx ", ms->remap_end_vaddr[i]); } fprintf(fp, "\n"); fprintf(fp, " remap_start_pfn:"); for (i = 0; i < max_numnodes; ++i) { if ((i % 8) == 0) fprintf(fp, "\n "); fprintf(fp, "%08lx ", ms->remap_start_pfn[i]); } fprintf(fp, "\n"); } } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void x86_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (pcp) *pcp = x86_get_pc(bt); if (spp) *spp = x86_get_sp(bt); } /* * Get the saved PC from a user-space copy of the kernel stack. */ static ulong x86_get_pc(struct bt_info *bt) { ulong offset; ulong eip; if (tt->flags & THREAD_INFO) { readmem(bt->task + OFFSET(task_struct_thread_eip), KVADDR, &eip, sizeof(void *), "thread_struct eip", FAULT_ON_ERROR); return eip; } offset = OFFSET_OPTION(task_struct_thread_eip, task_struct_tss_eip); return GET_STACK_ULONG(offset); } /* * Get the saved SP from a user-space copy of the kernel stack if it * cannot be found in the panic_ksp array. */ static ulong x86_get_sp(struct bt_info *bt) { ulong offset, ksp; if (get_panic_ksp(bt, &ksp)) return ksp; if (tt->flags & THREAD_INFO) { readmem(bt->task + OFFSET(task_struct_thread_esp), KVADDR, &ksp, sizeof(void *), "thread_struct esp", FAULT_ON_ERROR); return ksp; } offset = OFFSET_OPTION(task_struct_thread_esp, task_struct_tss_esp); return GET_STACK_ULONG(offset); } /* * Translate a PTE, returning TRUE if the page is _PAGE_PRESENT. * If a physaddr pointer is passed in, don't print anything. */ static int x86_translate_pte(ulong pte, void *physaddr, ulonglong pae_pte) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; ulonglong paddr; int nx_bit_set; nx_bit_set = FALSE; if (machdep->flags & PAE) { paddr = PAE_PAGEBASE(pae_pte); sprintf(ptebuf, "%llx", pae_pte); if (pae_pte & _PAGE_NX) nx_bit_set = TRUE; pte = (ulong)pae_pte; } else { paddr = NONPAE_PAGEBASE(pte); sprintf(ptebuf, "%lx", pte); } page_present = (pte & (_PAGE_PRESENT|_PAGE_PROTNONE)); if (physaddr) { if (machdep->flags & PAE) *((ulonglong *)physaddr) = paddr; else *((ulong *)physaddr) = (ulong)paddr; return page_present; } len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(machdep->flags & PAE ? pae_pte : pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_PRESENT) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte & _PAGE_RW) fprintf(fp, "%sRW", others++ ? "|" : ""); if (pte & _PAGE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (pte & _PAGE_PWT) fprintf(fp, "%sPWT", others++ ? "|" : ""); if (pte & _PAGE_PCD) fprintf(fp, "%sPCD", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if ((pte & _PAGE_PSE) && (pte && _PAGE_PRESENT)) fprintf(fp, "%sPSE", others++ ? "|" : ""); if (pte & _PAGE_GLOBAL) fprintf(fp, "%sGLOBAL", others++ ? "|" : ""); if (pte & _PAGE_PROTNONE && !(pte && _PAGE_PRESENT)) fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); if (nx_bit_set) fprintf(fp, "%sNX", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * For the time being, walk through the kernel page directory looking * for the 4MB PTEs. Zones might make this common code in the future. */ static uint64_t x86_memory_size(void) { int i, j; ulong *pp; ulong kpgd[PTRS_PER_PGD]; uint64_t vm_total; uint64_t pgd_total; if (machdep->memsize) return machdep->memsize; if (!(machdep->flags & PAE)) { readmem(vt->kernel_pgd[0], KVADDR, kpgd, sizeof(ulong) * PTRS_PER_PGD, "kernel page directory", FAULT_ON_ERROR); for (i = j = 0, pp = &kpgd[0]; i < PTRS_PER_PGD; i++, pp++) { if ((*pp & (_PAGE_PRESENT|_PAGE_4M)) == (_PAGE_PRESENT|_PAGE_4M) ) { j++; } } pgd_total = (uint64_t)j * (uint64_t)(MEGABYTES(4)); } else pgd_total = 0; /* * Use the memory node data (or its equivalent) if it's larger than * the page directory total. */ vm_total = total_node_memory(); machdep->memsize = MAX(pgd_total, vm_total); return (machdep->memsize); } /* * Determine where vmalloc'd memory starts. */ static ulong x86_vmalloc_start(void) { return (first_vmalloc_address()); } /* * Do the work for cmd_irq() -d option. */ void x86_display_idt_table(void) { int i; ulong *ip; char buf[BUFSIZE]; ip = read_idt_table(READ_IDT_RUNTIME); for (i = 0; i < 256; i++, ip += 2) { if (i < 10) fprintf(fp, " "); else if (i < 100) fprintf(fp, " "); fprintf(fp, "[%d] %s\n", i, extract_idt_function(ip, buf, NULL)); } } /* * Extract the function name out of the IDT entry. */ static char * extract_idt_function(ulong *ip, char *buf, ulong *retaddr) { ulong i1, i2, addr; char locbuf[BUFSIZE]; physaddr_t phys; if (buf) BZERO(buf, BUFSIZE); i1 = *ip; i2 = *(ip+1); i1 &= 0x0000ffff; i2 &= 0xffff0000; addr = i1 | i2; if (retaddr) *retaddr = addr; if (!buf) return NULL; value_to_symstr(addr, locbuf, 0); if (strlen(locbuf)) sprintf(buf, "%s", locbuf); else { sprintf(buf, "%08lx", addr); if (kvtop(NULL, addr, &phys, 0)) { addr = machdep->kvbase + (ulong)phys; if (value_to_symstr(addr, locbuf, 0)) { strcat(buf, " <"); strcat(buf, locbuf); strcat(buf, ">"); } } } return buf; } /* * Read the IDT table into a (hopefully) malloc'd buffer. */ static ulong * read_idt_table(int flag) { ulong *idt, addr, offset; physaddr_t phys; long desc_struct_size; struct syment *sp; struct machine_specific *ms; idt = NULL; ms = machdep->machspec; if (ms->idt_table) return ms->idt_table; desc_struct_size = SIZE(desc_struct) * 256; switch (flag) { case READ_IDT_INIT: if (!symbol_exists("idt_table")) return NULL; if (!(idt = (ulong *)malloc(desc_struct_size))) { error(WARNING, "cannot malloc idt_table\n\n"); return NULL; } if (!readmem(symbol_value("idt_table"), KVADDR, idt, desc_struct_size, "idt_table", RETURN_ON_ERROR)) { error(WARNING, "cannot read idt_table\n\n"); return NULL; } ms->idt_table = idt; addr = 0; extract_idt_function(idt, NULL, &addr); if (addr) { if (symbol_exists("__entry_tramp_start") && symbol_exists("__entry_tramp_end") && symbol_exists("__start___entry_text")) { ms->entry_tramp_start = symbol_value("__start___entry_text"); ms->entry_tramp_end = ms->entry_tramp_start + (symbol_value("__entry_tramp_end") - symbol_value("__entry_tramp_start")); ms->entry_tramp_start_phys = 0; machdep->value_to_symbol = x86_is_entry_tramp_address; } else if (!(sp = value_search(addr, &offset))) { addr = VIRTPAGEBASE(addr); if (kvtop(NULL, addr, &phys, 0) && (sp = value_search(PTOV(phys), &offset)) && STREQ(sp->name, "entry_tramp_start")) { ms->entry_tramp_start = addr; ms->entry_tramp_start_phys = phys; ms->entry_tramp_end = addr + (symbol_value("entry_tramp_end") - symbol_value("entry_tramp_start")); machdep->value_to_symbol = x86_is_entry_tramp_address; } } } break; case READ_IDT_RUNTIME: if (!symbol_exists("idt_table")) error(FATAL, "idt_table does not exist on this architecture\n"); idt = (ulong *)GETBUF(desc_struct_size); readmem(symbol_value("idt_table"), KVADDR, idt, desc_struct_size, "idt_table", FAULT_ON_ERROR); break; } return idt; } /* * If the address fits in the entry_tramp_start page, find the syment * associated with it. */ struct syment * x86_is_entry_tramp_address(ulong vaddr, ulong *retoffset) { struct syment *sp; struct machine_specific *ms; ulong addr, offset; ms = machdep->machspec; if (!ms->entry_tramp_start || !((vaddr >= ms->entry_tramp_start) && (vaddr <= ms->entry_tramp_end))) return NULL; /* * Check new vs. old style handling of entry_tramp addresses: * * - The old way requires creation of the real symbol address from * the entry_tramp address passed in. * - The new way just uses the absolute (A) symbols that are built * in using the entry_tramp addresses, w/no phys address required. */ if (ms->entry_tramp_start_phys) /* old */ addr = machdep->kvbase + (ulong)ms->entry_tramp_start_phys + PAGEOFFSET(vaddr); else /* new */ addr = vaddr; if ((sp = value_search_base_kernel(addr, &offset))) { if (retoffset) *retoffset = offset; if (CRASHDEBUG(4)) console("x86_is_entry_tramp_address: %lx: %s %lx+%ld\n", vaddr, sp->name, sp->value, offset); if (STREQ(sp->name, "entry_tramp_start")) sp++; } return sp; } /* * X86 tasks are all stacksize-aligned, except when split from the stack. */ static int x86_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * Keep or reject a symbol from the namelist. */ static int x86_verify_symbol(const char *name, ulong value, char type) { if (XEN_HYPER_MODE() && STREQ(name, "__per_cpu_shift")) return TRUE; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "_text") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (STREQ(name, "Letext") || STREQ(name, "gcc2_compiled.")) return FALSE; return TRUE; } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int x86_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on x86) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ if (CRASHDEBUG(1)) console("IN: %s", inbuf); colon = (inbuf[0] != ' ') ? strstr(inbuf, ":") : NULL; if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } else if (STREQ(argv[argc-2], "call") && hexadecimal(argv[argc-1], 0)) { /* * Update module code of the form: * * call 0xe081e1e0 * * to show a bracketed direct call target. */ p1 = &LASTCHAR(inbuf); if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) { sprintf(buf1, " <%s>\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p1, "%s", buf1); } } else if (STREQ(argv[2], "ud2a")) pc->curcmd_flags |= UD2A_INSTRUCTION; else if (STREQ(argv[2], "(bad)")) pc->curcmd_flags |= BAD_INSTRUCTION; if (CRASHDEBUG(1)) console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int x86_get_smp_cpus(void) { int count, cpucount; if ((count = get_cpus_online()) == 0) { count = kt->cpus; if (symbol_exists("cpucount")) { get_symbol_data("cpucount", sizeof(int), &cpucount); cpucount++; count = MAX(cpucount, kt->cpus); } } if (XEN() && (count == 1) && symbol_exists("cpu_present_map")) { ulong cpu_present_map; get_symbol_data("cpu_present_map", sizeof(ulong), &cpu_present_map); cpucount = count_bits_long(cpu_present_map); count = MAX(cpucount, kt->cpus); } if (KVMDUMP_DUMPFILE() && (count < get_cpus_present())) return(get_highest_cpu_present()+1); return MAX(count, get_highest_cpu_online()+1); } /* * Machine dependent command. */ void x86_cmd_mach(void) { int c, cflag, mflag; unsigned int radix; cflag = mflag = radix = 0; while ((c = getopt(argcnt, args, "cmxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'm': mflag++; x86_display_memmap(); break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) x86_display_cpu_data(radix); if (!cflag && !mflag) x86_display_machine_stats(); } /* * "mach" command output. */ static void x86_display_machine_stats(void) { int c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); if (!STREQ(kt->hypervisor, "(undetermined)") && !STREQ(kt->hypervisor, "bare hardware")) fprintf(fp, " HYPERVISOR: %s\n", kt->hypervisor); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (tt->flags & IRQSTACKS) { fprintf(fp, "HARD IRQ STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " HARD IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->hardirq_ctx[c]) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->hardirq_ctx[c]); } fprintf(fp, "SOFT IRQ STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " SOFT IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->softirq_ctx) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->softirq_ctx[c]); } } } static void x86_display_cpu_data(unsigned int radix) { int cpu; ulong cpu_data = 0; if (symbol_exists("cpu_data")) cpu_data = symbol_value("cpu_data"); else if (symbol_exists("boot_cpu_data")) cpu_data = symbol_value("boot_cpu_data"); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu); dump_struct("cpuinfo_x86", cpu_data, radix); cpu_data += SIZE(cpuinfo_x86); } } static char *e820type[] = { "(invalid type)", "E820_RAM", "E820_RESERVED", "E820_ACPI", "E820_NVS", "E820_UNUSABLE", }; static void x86_display_memmap(void) { ulong e820; int nr_map, i; char *buf, *e820entry_ptr; ulonglong addr, size; ulong type; e820 = symbol_value("e820"); buf = (char *)GETBUF(SIZE(e820map)); readmem(e820, KVADDR, &buf[0], SIZE(e820map), "e820map", FAULT_ON_ERROR); nr_map = INT(buf + OFFSET(e820map_nr_map)); fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); for (i = 0; i < nr_map; i++) { e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i); addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr)); size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size)); type = ULONG(e820entry_ptr + OFFSET(e820entry_type)); fprintf(fp, "%016llx - %016llx ", addr, addr+size); if (type >= (sizeof(e820type)/sizeof(char *))) fprintf(fp, "type %ld\n", type); else fprintf(fp, "%s\n", e820type[type]); } } /* * Check a few functions to determine whether the kernel was built * with the -fomit-frame-pointer flag. */ #define PUSH_BP_MOV_ESP_BP 0xe58955 #define PUSH_BP_CLR_EAX_MOV_ESP_BP 0xe589c03155ULL static int x86_omit_frame_pointer(void) { ulonglong push_bp_mov_esp_bp; int i; char *checkfuncs[] = {"sys_open", "sys_fork", "sys_read"}; if (pc->flags & KERNEL_DEBUG_QUERY) return FALSE; for (i = 0; i < 2; i++) { if (!readmem(symbol_value(checkfuncs[i]), KVADDR, &push_bp_mov_esp_bp, sizeof(ulonglong), "x86_omit_frame_pointer", RETURN_ON_ERROR)) return TRUE; if (!(((push_bp_mov_esp_bp & 0x0000ffffffULL) == PUSH_BP_MOV_ESP_BP) || ((push_bp_mov_esp_bp & 0xffffffffffULL) == PUSH_BP_CLR_EAX_MOV_ESP_BP))) return TRUE; } return FALSE; } /* * Disassemble an address and determine whether the instruction calls * a function; if so, return a pointer to the name of the called function. */ char * x86_function_called_by(ulong eip) { struct syment *sp; char buf[BUFSIZE], *p1, *p2, *funcname; ulong value, offset; unsigned char byte; funcname = NULL; if (!readmem(eip, KVADDR, &byte, sizeof(unsigned char), "call byte", RETURN_ON_ERROR)) return funcname; if (byte != 0xe8) return funcname; sprintf(buf, "x/i 0x%lx", eip); open_tmpfile2(); if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { if ((p1 = strstr(buf, "call "))) { p1 += strlen("call "); if ((p2 = strstr(p1, " <"))) { p2 += strlen(" <"); if ((p1 = strstr(p2, ">"))) *p1 = NULLCHAR; if ((sp = symbol_search(p2))) funcname = sp->name; } else if ((p2 = strstr(p1, "0x"))) { if (!extract_hex(strip_linefeeds(p2), &value, NULLCHAR, TRUE)) continue; if ((sp = value_search(value, &offset)) && !offset) funcname = sp->name; } } } } close_tmpfile2(); return funcname; } struct syment * x86_text_lock_jmp(ulong eip, ulong *offset) { int i, c; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS]; struct syment *sp; ulong value; sprintf(buf1, "x/10i 0x%lx", eip); buf2[0] = NULLCHAR; value = 0; open_tmpfile2(); if (gdb_pass_through(buf1, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile2); while (fgets(buf1, BUFSIZE, pc->tmpfile2)) { if (!(c = parse_line(buf1, arglist))) continue; for (i = 0; i < c; i++) { if (STREQ(arglist[i], "jmp") && ((i+1)kernel_pgd[i] = value; } static ulong xen_m2p_nonPAE(ulong machine) { ulonglong pseudo; pseudo = xen_m2p((ulonglong)machine); if (pseudo == XEN_MACHADDR_NOT_FOUND) return XEN_MFN_NOT_FOUND; return ((ulong)pseudo); } #include "netdump.h" #include "xen_dom0.h" /* * From the xen vmcore, create an index of mfns for each page that makes * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. */ #define MAX_X86_FRAMES (16) #define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) static int x86_xen_kdump_p2m_create(struct xen_kdump_data *xkd) { int i, j; ulong kvaddr; ulong *up; ulonglong *ulp; ulong frames; ulong frame_mfn[MAX_X86_FRAMES] = { 0 }; int mfns[MAX_X86_FRAMES] = { 0 }; /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); if (xkd->flags & KDUMP_CR3) goto use_cr3; xkd->p2m_frames = 0; if (CRASHDEBUG(1)) fprintf(fp, "x86_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), "xen kdump p2m mfn page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn page\n"); if (CRASHDEBUG(1)) { up = (ulong *)xkd->page; for (i = 0; i < 4; i++) { fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } fprintf(fp, "\n"); } for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_FRAMES; i++, up++) frame_mfn[i] = *up; for (i = 0; i < MAX_X86_FRAMES; i++) { if (!frame_mfn[i]) break; if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn list page\n"); for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++) if (*up) mfns[i]++; xkd->p2m_frames += mfns[i]; if (CRASHDEBUG(7)) { up = (ulong *)xkd->page; for (j = 0; j < 256; j++) { fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((j * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } } if (CRASHDEBUG(1)) fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); if ((xkd->p2m_mfn_frame_list = (ulong *) malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); for (i = 0, frames = xkd->p2m_frames; frames; i++) { if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn list page\n"); frames -= mfns[i]; } if (CRASHDEBUG(2)) { for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); fprintf(fp, "\n"); } pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; use_cr3: if (CRASHDEBUG(1)) fprintf(fp, "x86_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->pgd, PAGESIZE(), "xen kdump cr3 page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump cr3 page\n"); if (CRASHDEBUG(7)) { fprintf(fp, "contents of page directory page:\n"); if (machdep->flags & PAE) { ulp = (ulonglong *)machdep->pgd; fprintf(fp, "%016llx %016llx %016llx %016llx\n", *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); } else { up = (ulong *)machdep->pgd; for (i = 0; i < 256; i++) { fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } } kvaddr = symbol_value("max_pfn"); if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) return FALSE; up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if (CRASHDEBUG(1)) fprintf(fp, "max_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", kvaddr, *up, *up, xkd->p2m_frames); if ((xkd->p2m_mfn_frame_list = (ulong *) malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); kvaddr = symbol_value("phys_to_machine_mapping"); if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) return FALSE; up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); kvaddr = *up; if (CRASHDEBUG(1)) fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr); if (CRASHDEBUG(7)) { fprintf(fp, "contents of first phys_to_machine_mapping page:\n"); if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) error(INFO, "cannot read first phys_to_machine_mapping page\n"); up = (ulong *)xkd->page; for (i = 0; i < 256; i++) { fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } machdep->last_ptbl_read = BADADDR; machdep->last_pmd_read = BADADDR; machdep->last_pgd_read = BADADDR; for (i = 0; i < xkd->p2m_frames; i++) { xkd->p2m_mfn_frame_list[i] = x86_xen_kdump_page_mfn(kvaddr); kvaddr += PAGESIZE(); } if (CRASHDEBUG(1)) { for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); fprintf(fp, "\n"); } machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; machdep->last_pgd_read = 0; pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * x86_xen_kdump_load_page(ulong kvaddr, char *pgbuf) { ulong *entry; ulong *up; ulong mfn; if (machdep->flags & PAE) return x86_xen_kdump_load_page_PAE(kvaddr, pgbuf); up = (ulong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (*entry) >> PAGESHIFT(); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump pgd entry", RETURN_ON_ERROR)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return NULL; } up = (ulong *)pgbuf; entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); mfn = (*entry) >> PAGESHIFT(); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen page table page", RETURN_ON_ERROR)) { error(INFO, "cannot read/find page table page\n"); return NULL; } return pgbuf; } static char * x86_xen_kdump_load_page_PAE(ulong kvaddr, char *pgbuf) { ulonglong *entry; ulonglong *up; ulong mfn; up = (ulonglong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump pgd entry", RETURN_ON_ERROR)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return NULL; } up = (ulonglong *)pgbuf; entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump pmd entry", RETURN_ON_ERROR)) { error(INFO, "cannot read/find pmd entry from pgd\n"); return NULL; } up = (ulonglong *)pgbuf; entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump page table page", RETURN_ON_ERROR)) { error(INFO, "cannot read/find page table page from pmd\n"); return NULL; } return pgbuf; } /* * Return the mfn value associated with a virtual address. */ static ulong x86_xen_kdump_page_mfn(ulong kvaddr) { ulong *entry; ulong *up; ulong mfn; if (machdep->flags & PAE) return x86_xen_kdump_page_mfn_PAE(kvaddr); up = (ulong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (*entry) >> PAGESHIFT(); if ((mfn != machdep->last_ptbl_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), "xen kdump pgd entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", mfn); machdep->last_ptbl_read = mfn; up = (ulong *)machdep->ptbl; entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); mfn = (*entry) >> PAGESHIFT(); return mfn; } static ulong x86_xen_kdump_page_mfn_PAE(ulong kvaddr) { ulonglong *entry; ulonglong *up; ulong mfn; up = (ulonglong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((mfn != machdep->last_pmd_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), "xen kdump pgd entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", mfn); machdep->last_pmd_read = mfn; up = (ulonglong *)machdep->pmd; entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((mfn != machdep->last_ptbl_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), "xen kdump pmd entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pmd entry from pgd (mfn: %lx)\n", mfn); machdep->last_ptbl_read = mfn; up = (ulonglong *)machdep->ptbl; entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); return mfn; } #include "xendump.h" /* * Create an index of mfns for each page that makes up the * kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int x86_xendump_p2m_create(struct xendump_data *xd) { int i, idx; ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ulong *up; ulonglong *ulp; off_t offset; /* * Check for pvops Xen kernel before presuming it's HVM. */ if (symbol_exists("pv_init_ops") && symbol_exists("xen_patch") && (xd->xc_core.header.xch_magic == XC_CORE_MAGIC)) return x86_pvops_xendump_p2m_create(xd); if (!symbol_exists("phys_to_machine_mapping")) { xd->flags |= XC_CORE_NO_P2M; return TRUE; } if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == INVALID_OFFSET) error(FATAL, "cannot determine vcpu_guest_context.ctrlreg offset\n"); else if (CRASHDEBUG(1)) fprintf(xd->ofp, "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ctrlreg_offset); offset = xd->xc_core.header.xch_ctxt_offset + (off_t)ctrlreg_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_ctxt_offset\n"); if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != sizeof(ctrlreg)) error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); mfn = (ctrlreg[3] >> PAGESHIFT()) | (ctrlreg[3] << (BITS()-PAGESHIFT())); for (i = 0; CRASHDEBUG(1) && (i < 8); i++) { fprintf(xd->ofp, "ctrlreg[%d]: %lx", i, ctrlreg[i]); if (i == 3) fprintf(xd->ofp, " -> mfn: %lx", mfn); fprintf(xd->ofp, "\n"); } if (!xc_core_mfn_to_page(mfn, machdep->pgd)) error(FATAL, "cannot read/find cr3 page\n"); machdep->last_pgd_read = mfn; if (CRASHDEBUG(1)) { fprintf(xd->ofp, "contents of page directory page:\n"); if (machdep->flags & PAE) { ulp = (ulonglong *)machdep->pgd; fprintf(xd->ofp, "%016llx %016llx %016llx %016llx\n", *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); } else { up = (ulong *)machdep->pgd; for (i = 0; i < 256; i++) { fprintf(xd->ofp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } } kvaddr = symbol_value("max_pfn"); if (!x86_xendump_load_page(kvaddr, xd->page)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(xd->ofp, "max_pfn: %lx\n", *up); xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if ((xd->xc_core.p2m_frame_index_list = (ulong *) malloc(xd->xc_core.p2m_frames * sizeof(int))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); kvaddr = symbol_value("phys_to_machine_mapping"); if (!x86_xendump_load_page(kvaddr, xd->page)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(fp, "phys_to_machine_mapping: %lx\n", *up); kvaddr = *up; machdep->last_ptbl_read = BADADDR; machdep->last_pmd_read = BADADDR; for (i = 0; i < xd->xc_core.p2m_frames; i++) { if ((idx = x86_xendump_page_index(kvaddr)) == MFN_NOT_FOUND) return FALSE; xd->xc_core.p2m_frame_index_list[i] = idx; kvaddr += PAGESIZE(); } machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; return TRUE; } static int x86_pvops_xendump_p2m_create(struct xendump_data *xd) { int i; ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ulong *up; ulonglong *ulp; off_t offset; if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == INVALID_OFFSET) error(FATAL, "cannot determine vcpu_guest_context.ctrlreg offset\n"); else if (CRASHDEBUG(1)) fprintf(xd->ofp, "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ctrlreg_offset); offset = xd->xc_core.header.xch_ctxt_offset + (off_t)ctrlreg_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_ctxt_offset\n"); if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != sizeof(ctrlreg)) error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); mfn = (ctrlreg[3] >> PAGESHIFT()) | (ctrlreg[3] << (BITS()-PAGESHIFT())); for (i = 0; CRASHDEBUG(1) && (i < 8); i++) { fprintf(xd->ofp, "ctrlreg[%d]: %lx", i, ctrlreg[i]); if (i == 3) fprintf(xd->ofp, " -> mfn: %lx", mfn); fprintf(xd->ofp, "\n"); } if (!xc_core_mfn_to_page(mfn, machdep->pgd)) error(FATAL, "cannot read/find cr3 page\n"); machdep->last_pgd_read = mfn; if (CRASHDEBUG(1)) { fprintf(xd->ofp, "contents of page directory page:\n"); if (machdep->flags & PAE) { ulp = (ulonglong *)machdep->pgd; fprintf(xd->ofp, "%016llx %016llx %016llx %016llx\n", *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); } else { up = (ulong *)machdep->pgd; for (i = 0; i < 256; i++) { fprintf(xd->ofp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } } kvaddr = symbol_value("max_pfn"); if (!x86_xendump_load_page(kvaddr, xd->page)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(xd->ofp, "max_pfn: %lx\n", *up); xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if ((xd->xc_core.p2m_frame_index_list = (ulong *) malloc(xd->xc_core.p2m_frames * sizeof(int))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); if (symbol_exists("p2m_mid_missing")) return x86_pvops_xendump_p2m_l3_create(xd); else return x86_pvops_xendump_p2m_l2_create(xd); } static int x86_pvops_xendump_p2m_l2_create(struct xendump_data *xd) { int i, idx, p; ulong kvaddr, *up; machdep->last_ptbl_read = BADADDR; machdep->last_pmd_read = BADADDR; kvaddr = symbol_value("p2m_top"); for (p = 0; p < xd->xc_core.p2m_frames; p += XEN_PFNS_PER_PAGE) { if (!x86_xendump_load_page(kvaddr, xd->page)) return FALSE; if (CRASHDEBUG(7)) x86_debug_dump_page(xd->ofp, xd->page, "contents of page:"); up = (ulong *)(xd->page); for (i = 0; i < XEN_PFNS_PER_PAGE; i++, up++) { if ((p+i) >= xd->xc_core.p2m_frames) break; if ((idx = x86_xendump_page_index(*up)) == MFN_NOT_FOUND) return FALSE; xd->xc_core.p2m_frame_index_list[p+i] = idx; } kvaddr += PAGESIZE(); } machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; return TRUE; } static int x86_pvops_xendump_p2m_l3_create(struct xendump_data *xd) { int i, idx, j, p2m_frame, ret = FALSE; ulong kvaddr, *p2m_mid, p2m_mid_missing, p2m_missing, *p2m_top; p2m_top = NULL; machdep->last_ptbl_read = BADADDR; machdep->last_pmd_read = BADADDR; kvaddr = symbol_value("p2m_missing"); if (!x86_xendump_load_page(kvaddr, xd->page)) goto err; p2m_missing = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); kvaddr = symbol_value("p2m_mid_missing"); if (!x86_xendump_load_page(kvaddr, xd->page)) goto err; p2m_mid_missing = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); kvaddr = symbol_value("p2m_top"); if (!x86_xendump_load_page(kvaddr, xd->page)) goto err; kvaddr = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (!x86_xendump_load_page(kvaddr, xd->page)) goto err; if (CRASHDEBUG(7)) x86_debug_dump_page(xd->ofp, xd->page, "contents of p2m_top page:"); p2m_top = (ulong *)GETBUF(PAGESIZE()); memcpy(p2m_top, xd->page, PAGESIZE()); for (i = 0; i < XEN_P2M_TOP_PER_PAGE; ++i) { p2m_frame = i * XEN_P2M_MID_PER_PAGE; if (p2m_frame >= xd->xc_core.p2m_frames) break; if (p2m_top[i] == p2m_mid_missing) continue; if (!x86_xendump_load_page(p2m_top[i], xd->page)) goto err; if (CRASHDEBUG(7)) x86_debug_dump_page(xd->ofp, xd->page, "contents of p2m_mid page:"); p2m_mid = (ulong *)xd->page; for (j = 0; j < XEN_P2M_MID_PER_PAGE; ++j, ++p2m_frame) { if (p2m_frame >= xd->xc_core.p2m_frames) break; if (p2m_mid[j] == p2m_missing) continue; idx = x86_xendump_page_index(p2m_mid[j]); if (idx == MFN_NOT_FOUND) goto err; xd->xc_core.p2m_frame_index_list[p2m_frame] = idx; } } machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; ret = TRUE; err: if (p2m_top) FREEBUF(p2m_top); return ret; } static void x86_debug_dump_page(FILE *ofp, char *page, char *name) { int i; ulong *up; fprintf(ofp, "%s\n", name); up = (ulong *)page; for (i = 0; i < 256; i++) { fprintf(ofp, "%016lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * x86_xendump_load_page(ulong kvaddr, char *pgbuf) { ulong *entry; ulong *up; ulong mfn; if (machdep->flags & PAE) return x86_xendump_load_page_PAE(kvaddr, pgbuf); up = (ulong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (*entry) >> PAGESHIFT(); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return NULL; } up = (ulong *)pgbuf; entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); mfn = (*entry) >> PAGESHIFT(); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find page table page\n"); return NULL; } return pgbuf; } static char * x86_xendump_load_page_PAE(ulong kvaddr, char *pgbuf) { ulonglong *entry; ulonglong *up; ulong mfn; up = (ulonglong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return NULL; } up = (ulonglong *)pgbuf; entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find pmd entry from pgd\n"); return NULL; } up = (ulonglong *)pgbuf; entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find page table page from pmd\n"); return NULL; } return pgbuf; } /* * Find the dumpfile page index associated with the kvaddr. */ static int x86_xendump_page_index(ulong kvaddr) { int idx; ulong *entry; ulong *up; ulong mfn; if (machdep->flags & PAE) return x86_xendump_page_index_PAE(kvaddr); up = (ulong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (*entry) >> PAGESHIFT(); if ((mfn != machdep->last_ptbl_read) && !xc_core_mfn_to_page(mfn, machdep->ptbl)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return MFN_NOT_FOUND; } machdep->last_ptbl_read = mfn; up = (ulong *)machdep->ptbl; entry = up + ((kvaddr>>12) & (PTRS_PER_PTE-1)); mfn = (*entry) >> PAGESHIFT(); if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) error(INFO, "cannot determine page index for %lx\n", kvaddr); return idx; } static int x86_xendump_page_index_PAE(ulong kvaddr) { int idx; ulonglong *entry; ulonglong *up; ulong mfn; up = (ulonglong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((mfn != machdep->last_pmd_read) && !xc_core_mfn_to_page(mfn, machdep->pmd)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return MFN_NOT_FOUND; } machdep->last_pmd_read = mfn; up = (ulonglong *)machdep->pmd; entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((mfn != machdep->last_ptbl_read) && !xc_core_mfn_to_page(mfn, machdep->ptbl)) { error(INFO, "cannot read/find pmd entry from pgd\n"); return MFN_NOT_FOUND; } machdep->last_ptbl_read = mfn; up = (ulonglong *)machdep->ptbl; entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) error(INFO, "cannot determine page index for %lx\n", kvaddr); return idx; } /* * Pull the esp from the cpu_user_regs struct in the header * turn it into a task, and match it with the active_set. * Unfortunately, the registers in the vcpu_guest_context * are not necessarily those of the panic task, so for now * let get_active_set_panic_task() get the right task. */ static ulong x86_xendump_panic_task(struct xendump_data *xd) { return NO_TASK; #ifdef TO_BE_REVISITED int i; ulong esp; off_t offset; ulong task; if (INVALID_MEMBER(vcpu_guest_context_user_regs) || INVALID_MEMBER(cpu_user_regs_esp)) return NO_TASK; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_esp); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return NO_TASK; if (read(xd->xfd, &esp, sizeof(ulong)) != sizeof(ulong)) return NO_TASK; if (IS_KVADDR(esp) && (task = stkptr_to_task(esp))) { for (i = 0; i < NR_CPUS; i++) { if (task == tt->active_set[i]) { if (CRASHDEBUG(0)) error(INFO, "x86_xendump_panic_task: esp: %lx -> task: %lx\n", esp, task); return task; } } error(WARNING, "x86_xendump_panic_task: esp: %lx -> task: %lx (not active)\n", esp); } return NO_TASK; #endif } /* * Because of an off-by-one vcpu bug in early xc_domain_dumpcore() * instantiations, the registers in the vcpu_guest_context are not * necessarily those of the panic task. If not, the eip/esp will be * in stop_this_cpu, as a result of the IP interrupt in panic(), * but the trace is strange because it comes out of the hypervisor * at least if the vcpu had been idle. */ static void x86_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *eip, ulong *esp) { ulong task, xeip, xesp; off_t offset; if (INVALID_MEMBER(vcpu_guest_context_user_regs) || INVALID_MEMBER(cpu_user_regs_eip) || INVALID_MEMBER(cpu_user_regs_esp)) goto generic; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_esp); if (lseek(xd->xfd, offset, SEEK_SET) == -1) goto generic; if (read(xd->xfd, &xesp, sizeof(ulong)) != sizeof(ulong)) goto generic; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_eip); if (lseek(xd->xfd, offset, SEEK_SET) == -1) goto generic; if (read(xd->xfd, &xeip, sizeof(ulong)) != sizeof(ulong)) goto generic; if (IS_KVADDR(xesp) && (task = stkptr_to_task(xesp)) && (task == bt->task)) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "hooks from vcpu_guest_context: eip: %lx esp: %lx\n", xeip, xesp); *eip = xeip; *esp = xesp; return; } generic: return machdep->get_stack_frame(bt, eip, esp); } /* for Xen Hypervisor analysis */ static int x86_xenhyper_is_kvaddr(ulong addr) { if (machdep->flags & PAE) { return (addr >= HYPERVISOR_VIRT_START_PAE); } return (addr >= HYPERVISOR_VIRT_START); } static ulong x86_get_stackbase_hyper(ulong task) { struct xen_hyper_vcpu_context *vcc; int pcpu; ulong init_tss; ulong esp, base; char *buf; /* task means vcpu here */ vcc = xen_hyper_vcpu_to_vcpu_context(task); if (!vcc) error(FATAL, "invalid vcpu\n"); pcpu = vcc->processor; if (!xen_hyper_test_pcpu_id(pcpu)) { error(FATAL, "invalid pcpu number\n"); } if (symbol_exists("init_tss")) { init_tss = symbol_value("init_tss"); init_tss += XEN_HYPER_SIZE(tss_struct) * pcpu; } else { init_tss = symbol_value("per_cpu__init_tss"); init_tss = xen_hyper_per_cpu(init_tss, pcpu); } buf = GETBUF(XEN_HYPER_SIZE(tss_struct)); if (!readmem(init_tss, KVADDR, buf, XEN_HYPER_SIZE(tss_struct), "init_tss", RETURN_ON_ERROR)) { error(FATAL, "cannot read init_tss.\n"); } esp = ULONG(buf + XEN_HYPER_OFFSET(tss_struct_esp0)); FREEBUF(buf); base = esp & (~(STACKSIZE() - 1)); return base; } static ulong x86_get_stacktop_hyper(ulong task) { return x86_get_stackbase_hyper(task) + STACKSIZE(); } static void x86_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp) { struct xen_hyper_vcpu_context *vcc; int pcpu; ulong *regs; ulong esp, eip; /* task means vcpu here */ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); if (!vcc) error(FATAL, "invalid vcpu\n"); pcpu = vcc->processor; if (!xen_hyper_test_pcpu_id(pcpu)) { error(FATAL, "invalid pcpu number\n"); } if (bt->flags & BT_TEXT_SYMBOLS_ALL) { if (spp) *spp = x86_get_stackbase_hyper(bt->task); if (pcp) *pcp = 0; bt->flags &= ~BT_TEXT_SYMBOLS_ALL; return; } regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr; esp = XEN_HYPER_X86_NOTE_ESP(regs); eip = XEN_HYPER_X86_NOTE_EIP(regs); if (spp) { if (esp < x86_get_stackbase_hyper(bt->task) || esp >= x86_get_stacktop_hyper(bt->task)) *spp = x86_get_stackbase_hyper(bt->task); else *spp = esp; } if (pcp) { if (is_kernel_text(eip)) *pcp = eip; else *pcp = 0; } } static void x86_init_hyper(int when) { switch (when) { case PRE_SYMTAB: machdep->verify_symbol = x86_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 4; /* ODA: magic num */ if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->machspec = &x86_machine_specific; /* some members used */ break; case PRE_GDB: if (symbol_exists("create_pae_xen_mappings") || symbol_exists("idle_pg_table_l3")) { machdep->flags |= PAE; PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL; machdep->kvtop = x86_kvtop_PAE; machdep->kvbase = HYPERVISOR_VIRT_START_PAE; } else { PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; PTRS_PER_PGD = PTRS_PER_PGD_2LEVEL; machdep->kvtop = x86_kvtop; free(machdep->pmd); machdep->pmd = machdep->pgd; machdep->kvbase = HYPERVISOR_VIRT_START; } machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->identity_map_base = DIRECTMAP_VIRT_START; machdep->is_kvaddr = x86_xenhyper_is_kvaddr; machdep->eframe_search = x86_eframe_search; machdep->back_trace = x86_back_trace_cmd; machdep->processor_speed = x86_processor_speed; /* ODA: check */ machdep->dump_irq = generic_dump_irq; /* ODA: check */ machdep->get_stack_frame = x86_get_stack_frame_hyper; machdep->get_stackbase = x86_get_stackbase_hyper; machdep->get_stacktop = x86_get_stacktop_hyper; machdep->translate_pte = x86_translate_pte; machdep->memory_size = xen_hyper_x86_memory_size; machdep->dis_filter = x86_dis_filter; // machdep->cmd_mach = x86_cmd_mach; /* ODA: check */ machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus; // machdep->line_number_hooks = x86_line_number_hooks; /* ODA: check */ machdep->flags |= FRAMESIZE_DEBUG; /* ODA: check */ machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->clear_machdep_cache = x86_clear_machdep_cache; /* machdep table for Xen Hypervisor */ xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init; break; case POST_GDB: #if 0 /* ODA: need this ? */ if (x86_omit_frame_pointer()) { machdep->flags |= OMIT_FRAME_PTR; #endif XEN_HYPER_STRUCT_SIZE_INIT(cpu_time, "cpu_time"); XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); XEN_HYPER_STRUCT_SIZE_INIT(tss_struct, "tss_struct"); XEN_HYPER_MEMBER_OFFSET_INIT(tss_struct_esp0, "tss_struct", "esp0"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_local_tsc_stamp, "cpu_time", "local_tsc_stamp"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_local_stamp, "cpu_time", "stime_local_stamp"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_master_stamp, "cpu_time", "stime_master_stamp"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_tsc_scale, "cpu_time", "tsc_scale"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_calibration_timer, "cpu_time", "calibration_timer"); if (symbol_exists("cpu_data")) { xht->cpu_data_address = symbol_value("cpu_data"); } /* KAK Can this be calculated? */ if (!machdep->hz) { machdep->hz = XEN_HYPER_HZ; } break; case POST_INIT: break; } } #endif /* X86 */ crash-7.1.4/kvmdump.h0000664000000000000000000000475612634305150013172 0ustar rootroot/* * kvmdump.h * * Copyright (C) 2009, 2010 David Anderson * Copyright (C) 2009, 2010 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ struct mapinfo_trailer { uint64_t map_start_offset; uint64_t phys_base; uint32_t cpu_version_id; uint32_t ram_version_id; uint64_t checksum; uint64_t magic; }; struct register_set { uint32_t cs; uint32_t ss; uint32_t ds; uint32_t es; uint32_t fs; uint32_t gs; uint64_t ip; uint64_t flags; uint64_t regs[16]; }; #define REGS_MAGIC (0xfeedbeefdeadbabeULL) #define MAPFILE_MAGIC (0xfeedbabedeadbeefULL) #define CHKSUM_SIZE (4096) #define KVMDUMP_CACHED_PAGES 32 struct kvmdump_data { ulong flags; FILE *ofp; FILE *vmp; int mapfd; int vmfd; struct mapinfo_trailer mapinfo; /* page cache */ struct kvm_page_cache_hdr { uint64_t paddr; char *bufptr; } page_cache[KVMDUMP_CACHED_PAGES]; union { char *curbufptr; unsigned char compressed; } un; int evict_index; ulong accesses; ulong hit_count; ulong compresses; uint64_t kvbase; ulong *debug; uint32_t cpu_devices; struct register_set *registers; uint64_t iohole; }; #define TMPFILE (0x2) #define MAPFILE (0x4) #define MAPFILE_FOUND (0x8) #define MAPFILE_APPENDED (0x10) #define NO_PHYS_BASE (0x20) #define KVMHOST_32 (0x40) #define KVMHOST_64 (0x80) #define REGS_FROM_DUMPFILE (0x100) #define REGS_FROM_MAPFILE (0x200) #define REGS_NOT_AVAIL (0x400) extern struct kvmdump_data *kvm; #undef dprintf #define dprintf(x...) do { if (*(kvm->debug)) fprintf(kvm->ofp, x); } while (0) int store_mapfile_offset(uint64_t, off_t *); int load_mapfile_offset(uint64_t, off_t *); struct qemu_device_x86; int kvmdump_regs_store(uint32_t, struct qemu_device_x86 *); #define KVMDUMP_REGS_START (NR_CPUS+1) #define KVMDUMP_REGS_END (NR_CPUS+2) #define UPPER_32_BITS (0xffffffff00000000ULL) enum CPU_REG { R_EAX, R_ECX, R_EDX, R_EBX, R_ESP, R_EBP, R_ESI, R_EDI, R_GP_MAX, }; crash-7.1.4/makedumpfile.c0000664000000000000000000002206612634305150014137 0ustar rootroot/* * makedumpfile.c * * This code is for reading a dumpfile ganarated by makedumpfile command. * * Copyright (C) 2011 NEC Soft, Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Ken'ichi Ohmichi */ #define _LARGEFILE64_SOURCE 1 /* stat64() */ #include "defs.h" #include "makedumpfile.h" #include static void flattened_format_get_osrelease(char *); int flattened_format = 0; struct flat_data { int64_t off_flattened; int64_t off_rearranged; /* offset which will be rearranged. */ int64_t buf_size; }; struct all_flat_data { unsigned long long num_array; struct flat_data *array; size_t file_size; }; struct all_flat_data afd; struct makedumpfile_header fh_save; static int is_bigendian(void) { int i = 0x12345678; if (*(char *)&i == 0x12) return TRUE; else return FALSE; } static unsigned long long store_flat_data_array(char *file, struct flat_data **fda) { int result = FALSE, fd; int64_t offset_fdh; int64_t offset_report = 0; unsigned long long num_allocated = 0; unsigned long long num_stored = 0; unsigned long long sort_idx; unsigned long long size_allocated; struct flat_data *ptr = NULL, *cur, *new; struct makedumpfile_data_header fdh; struct stat64 stat; ulonglong pct, last_pct; char buf[BUFSIZE]; ssize_t bytes_read; fd = open(file, O_RDONLY); if (fd < 0) { error(INFO, "unable to open dump file %s\n", file); return -1; } if (lseek(fd, MAX_SIZE_MDF_HEADER, SEEK_SET) < 0) { error(INFO, "%s: seek error (flat format)\n", file); close(fd); return -1; } if (stat64(file, &stat) < 0) { error(INFO, "cannot stat64 %s\n", file); return -1; } please_wait("sorting flat format data"); pct = last_pct = 0; while (1) { if (num_allocated <= num_stored) { num_allocated += 100; size_allocated = sizeof(struct flat_data) * num_allocated; new = realloc(ptr, size_allocated); if (new == NULL) { error(INFO, "unable to realloc flat_data structures\n"); break; } ptr = new; } offset_fdh = lseek(fd, 0x0, SEEK_CUR); if ((bytes_read = read(fd, &fdh, sizeof(fdh))) != sizeof(fdh)) { if (bytes_read >= 0) error(INFO, "read error: %s (flat format): truncated/incomplete\n", file); else error(INFO, "read error: %s (flat format)\n", file); break; } if (!is_bigendian()){ fdh.offset = bswap_64(fdh.offset); fdh.buf_size = bswap_64(fdh.buf_size); } if (fdh.offset == END_FLAG_FLAT_HEADER) { result = TRUE; break; } cur = ptr + num_stored; sort_idx = num_stored; while (sort_idx) { new = ptr + --sort_idx; if (new->off_rearranged >= fdh.offset) { cur->off_flattened = new->off_flattened; cur->off_rearranged = new->off_rearranged; cur->buf_size = new->buf_size; cur = new; } else { if (CRASHDEBUG(1) && sort_idx + 1 != num_stored) { fprintf(fp, "makedumpfile: Moved from %lld to %lld\n", num_stored, sort_idx + 1); } break; } } cur->off_flattened = offset_fdh + sizeof(fdh); cur->off_rearranged = fdh.offset; cur->buf_size = fdh.buf_size; num_stored++; pct = (offset_fdh * 100ULL) / stat.st_size; if (pct > last_pct) { sprintf(buf, "sorting flat format data: %lld%%", (ulonglong)pct); please_wait(buf); if (CRASHDEBUG(1)) fprintf(fp, "\n"); last_pct = pct; } if (CRASHDEBUG(1) && (fdh.offset >> 30) > (offset_report >> 30)) { fprintf(fp, "makedumpfile: At %lld GiB\n", (ulonglong)(fdh.offset >> 30)); offset_report = fdh.offset; } /* seek for next makedumpfile_data_header. */ if (lseek(fd, fdh.buf_size, SEEK_CUR) < 0) { error(INFO, "%s: seek error (flat format)\n", file); break; } } please_wait_done(); close(fd); if (result == FALSE) { free(ptr); return -1; } *fda = ptr; return num_stored; } static int read_all_makedumpfile_data_header(char *file) { unsigned long long num; struct flat_data *fda = NULL; long long retval; retval = num = store_flat_data_array(file, &fda); if (retval < 0) return FALSE; afd.num_array = num; afd.array = fda; return TRUE; } void check_flattened_format(char *file) { int fd, get_osrelease; struct stat stat; struct makedumpfile_header fh; if (pc->flags2 & GET_OSRELEASE) { get_osrelease = TRUE; pc->flags2 &= ~GET_OSRELEASE; } else get_osrelease = FALSE; if (flattened_format) goto out; if (file_exists(file, &stat) && S_ISCHR(stat.st_mode)) goto out; fd = open(file, O_RDONLY); if (fd < 0) { error(INFO, "unable to open dump file %s\n", file); goto out; } if (read(fd, &fh, sizeof(fh)) < 0) { error(INFO, "unable to read dump file %s\n", file); close(fd); goto out; } close(fd); if (!is_bigendian()){ fh.type = bswap_64(fh.type); fh.version = bswap_64(fh.version); } if ((strncmp(fh.signature, MAKEDUMPFILE_SIGNATURE, sizeof(MAKEDUMPFILE_SIGNATURE)) != 0) || (fh.type != TYPE_FLAT_HEADER)) goto out; if (get_osrelease) { flattened_format_get_osrelease(file); return; } if (!read_all_makedumpfile_data_header(file)) return; if (CRASHDEBUG(1)) fprintf(fp, "%s: FLAT\n\n", file); fh_save = fh; flattened_format = TRUE; return; out: if (get_osrelease) pc->flags2 |= GET_OSRELEASE; } static int read_raw_dump_file(int fd, off_t offset, void *buf, size_t size) { if (lseek(fd, offset, SEEK_SET) < 0) { if (CRASHDEBUG(1)) error(INFO, "read_raw_dump_file: lseek error (flat format)\n"); return FALSE; } if (read(fd, buf, size) < size) { if (CRASHDEBUG(1)) error(INFO, "read_raw_dump_file: read error (flat format)\n"); return FALSE; } return TRUE; } int read_flattened_format(int fd, off_t offset, void *buf, size_t size) { unsigned long long index, index_start, index_end; int64_t range_start, range_end; size_t read_size, remain_size; off_t offset_read; struct flat_data *ptr; index_start = 0; index_end = afd.num_array; while (1) { index = (index_start + index_end) / 2; ptr = afd.array + index; range_start = ptr->off_rearranged; range_end = ptr->off_rearranged + ptr->buf_size; if ((range_start <= offset) && (offset < range_end)) { /* Found a corresponding array. */ offset_read = (offset - range_start) + ptr->off_flattened; if (offset + size <= range_end) { if (!read_raw_dump_file(fd, offset_read, buf, size)) return FALSE; break; } /* Searh other array corresponding to remaining data. */ read_size = range_end - offset; remain_size = size - read_size; if (!read_raw_dump_file(fd, offset_read, buf, read_size)) return FALSE; if (!read_flattened_format(fd, offset + read_size, (char *)buf + read_size, remain_size)) return FALSE; break; } else if ((index == index_start) && (index_start + 1 == index_end)) { /* * Try to read not-written area. That is a common case, * because the area might be skipped by lseek(). * This area should be the data filled with zero. */ ptr = afd.array + index_end; if (offset + size <= ptr->off_rearranged) { memset(buf, 0x0, size); } else { read_size = ptr->off_rearranged - offset; remain_size = size - read_size; memset(buf, 0x0, read_size); if (!read_flattened_format(fd, offset + read_size, (char *)buf + read_size, remain_size)) return FALSE; } break; } else if (offset < ptr->off_rearranged) index_end = index; else index_start = index; } return TRUE; } int is_flattened_format(char *file) { check_flattened_format(file); return flattened_format; } void dump_flat_header(FILE *ofp) { int i; fprintf(ofp, "makedumpfile header:\n"); fprintf(ofp, " signature: \""); for (i = 0; i < SIG_LEN_MDF; i++) { if (!fh_save.signature[i]) break; fprintf(ofp, "%c", fh_save.signature[i]); } fprintf(ofp, "\"\n"); fprintf(ofp, " type: %llx\n", (ulonglong)fh_save.type); fprintf(ofp, " version: %llx\n", (ulonglong)fh_save.version); fprintf(ofp, " all_flat_data:\n"); fprintf(ofp, " num_array: %lld\n", (ulonglong)afd.num_array); fprintf(ofp, " array: %lx\n", (ulong)afd.array); fprintf(ofp, " file_size: %ld\n\n", (ulong)afd.file_size); } static void flattened_format_get_osrelease(char *file) { int c; FILE *pipe; char buf[BUFSIZE], *p1, *p2; c = strlen("OSRELEASE="); sprintf(buf, "/usr/bin/strings -n %d %s", c, file); if ((pipe = popen(buf, "r")) == NULL) return; for (c = 0; (c < 100) && fgets(buf, BUFSIZE-1, pipe); c++) { if ((p1 = strstr(buf, "OSRELEASE="))) { p2 = strstr(p1, "="); fprintf(fp, "%s", p2+1); flattened_format = TRUE; pc->flags2 |= GET_OSRELEASE; } } fclose(pipe); } crash-7.1.4/xen_hyper_global_data.c0000664000000000000000000003305612634305150016007 0ustar rootroot/* * xen_hyper_global_data.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" /* * Global data for Xen hypervisor. */ struct xen_hyper_machdep_table xen_hyper_machdep_table = { 0 }; struct xen_hyper_machdep_table *xhmachdep = &xen_hyper_machdep_table; struct xen_hyper_table xen_hyper_table = { 0 }; struct xen_hyper_table *xht = &xen_hyper_table; struct xen_hyper_dumpinfo_table xen_hyper_dumpinfo_table = { 0 }; struct xen_hyper_dumpinfo_table *xhdit = &xen_hyper_dumpinfo_table; struct xen_hyper_domain_table xen_hyper_domain_table = { 0 }; struct xen_hyper_domain_table *xhdt = &xen_hyper_domain_table; struct xen_hyper_vcpu_table xen_hyper_vcpu_table = { 0 }; struct xen_hyper_vcpu_table *xhvct = &xen_hyper_vcpu_table; struct xen_hyper_pcpu_table xen_hyper_pcpu_table = { 0 }; struct xen_hyper_pcpu_table *xhpct = &xen_hyper_pcpu_table; struct xen_hyper_sched_table xen_hyper_sched_table = { 0 }; struct xen_hyper_sched_table *xhscht = &xen_hyper_sched_table; struct xen_hyper_symbol_table_data xen_hyper_symbol_table_data = { 0 }; struct xen_hyper_symbol_table_data *xhsymt = &xen_hyper_symbol_table_data; /* * The following commands are for Xen hypervisor. */ struct command_table_entry xen_hyper_command_table[] = { {"*", cmd_pointer, help_pointer, 0}, {"alias", cmd_alias, help_alias, 0}, {"ascii", cmd_ascii, help_ascii, 0}, {"bt", cmd_bt, help_bt, 0}, {"dis", cmd_dis, help_dis, 0}, {"domain", xen_hyper_cmd_domain, xen_hyper_help_domain, REFRESH_TASK_TABLE}, {"doms", xen_hyper_cmd_doms, xen_hyper_help_doms, REFRESH_TASK_TABLE}, #if defined(X86) || defined(X86_64) {"dumpinfo",xen_hyper_cmd_dumpinfo, xen_hyper_help_dumpinfo,0}, #endif {"eval", cmd_eval, help_eval, 0}, {"exit", cmd_quit, help_exit, 0}, {"extend", cmd_extend, help_extend, 0}, {"gdb", cmd_gdb, help_gdb, 0}, {"help", xen_hyper_cmd_help, help_help, 0}, {"list", cmd_list, help__list, 0}, {"log", xen_hyper_cmd_log, xen_hyper_help_log, 0}, {"p", cmd_p, help_p, 0}, {"pcpus", xen_hyper_cmd_pcpus, xen_hyper_help_pcpus, 0}, {"pte", cmd_pte, help_pte, 0}, {"q", cmd_quit, help_quit, 0}, {"rd", cmd_rd, help_rd, 0}, {"repeat", cmd_repeat, help_repeat, 0}, {"sched", xen_hyper_cmd_sched, xen_hyper_help_sched, 0}, {"search", cmd_search, help_search, 0}, {"set", cmd_set, help_set, 0}, {"struct", cmd_struct, help_struct, 0}, {"sym", cmd_sym, help_sym, 0}, {"sys", xen_hyper_cmd_sys, xen_hyper_help_sys, 0}, {"test", cmd_test, NULL, HIDDEN_COMMAND}, {"union", cmd_union, help_union, 0}, {"vcpu", xen_hyper_cmd_vcpu, xen_hyper_help_vcpu, REFRESH_TASK_TABLE}, {"vcpus", xen_hyper_cmd_vcpus, xen_hyper_help_vcpus, REFRESH_TASK_TABLE}, {"whatis", cmd_whatis, help_whatis, 0}, {"wr", cmd_wr, help_wr, 0}, {(char *)NULL} }; /* * */ struct xen_hyper_offset_table xen_hyper_offset_table = { 0 }; struct xen_hyper_size_table xen_hyper_size_table = { 0 }; /* * help data */ char *xen_hyper_help_domain[] = { "domain", "display contents of domain struct", "[domain-id | domainp] ...", " This command displays contents of domain struct for selected, or all, domains", " domain-id a domain id.", " domainp a domain pointer.", NULL }; char *xen_hyper_help_doms[] = { "doms", "display domain status information", "[domain-id | domainp] ...", " This command displays domain status for selected, or all, domains" , " domain-id a domain id.", " domainp a domain pointer.", " ", " 1. the DOMAIN-ID.", " 2. the struct domain pointer.", " 3. the domain state", " (SF:fully shut down, SH:shutting down, DY:dying,", " CP:pause by controller software, PO:polling event channels,", " PA:pause by the hypervisor, RU:running).", " 4. the TYPE of domain", " (O:dom_io, X:dom_xen, I:idle domain, 0:domain 0, U:domain U).", " 5. displays max_pages member of domain.", " 6. displays tot_pages member of domain.", " 7. a number of vcpu that domain is assigned.", " 8. the shared_info pointer of domain.", " 9. frame containing list of mfns containing list of mfns" , " containing p2m.", " ", " The active domain on each CPU will be highlighted by an angle ", " bracket (\">\") preceding its information.", " The crashing domain on each CPU will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the domain status of all:\n", " %s> doms", " DID DOMAIN ST T MAXPAGE TOTPAGE VCPU SHARED_I P2M_MFN", " 32753 ffbf8080 RU O 0 0 0 0 ----", " 32754 ffbfa080 RU X 0 0 0 0 ----", " 32767 ffbfc080 RU I 0 0 2 0 ----", " >* 0 ff198080 RU 0 ffffffff 32900 2 ff194000 18d0", " 4 ffbee080 RU U 4000 4000 2 ff18d000 3eb92", " 5 ff186080 RU U 4000 4000 2 ff184000 298d3", " %s>", NULL }; char *xen_hyper_help_dumpinfo[] = { "dumpinfo", "display Xen dump information", "[-t | -r] [pcpu-id | enotep] ...", " This command displays Xen dump information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " enotep a ELF Note pointer.", " -t display time information.", " -r display register information.", NULL }; char *xen_hyper_help_log[] = { "log", "dump system message buffer", " ", " This command dumps the xen conring contents in chronological order." , " ", "EXAMPLES", " Dump the Xen message buffer:\n", " %s> log", " __ __ _____ ___ _ _ _", " \\ \\/ /___ _ __ |___ / / _ \\ _ _ _ __ ___| |_ __ _| |__ | | ___", " \\ // _ \\ '_ \\ |_ \\| | | |__| | | | '_ \\/ __| __/ _` | '_ \\| |/ _ \\", " / \\ __/ | | | ___) | |_| |__| |_| | | | \\__ \\ || (_| | |_) | | __/", " /_/\\_\\___|_| |_| |____(_)___/ \\__,_|_| |_|___/\\__\\__,_|_.__/|_|\\___|", " ", " http://www.cl.cam.ac.uk/netos/xen", " University of Cambridge Computer Laboratory", " ", " Xen version 3.0-unstable (damm@) (gcc version 3.4.6 (Gentoo 3.4.6-r1, ssp-3.4.5-1.0,", " pie-8.7.9)) Wed Dec 6 17:34:32 JST 2006", " Latest ChangeSet: unavailable", " ", " (XEN) Console output is synchronous.", " (XEN) Command line: 12733-i386-pae/xen.gz console=com1 sync_console conswitch=bb com1", " =115200,8n1,0x3f8 dom0_mem=480000 crashkernel=64M@32M", " (XEN) Physical RAM map:", " (XEN) 0000000000000000 - 0000000000098000 (usable)", " (XEN) 0000000000098000 - 00000000000a0000 (reserved)", " (XEN) 00000000000f0000 - 0000000000100000 (reserved)", " (XEN) 0000000000100000 - 000000003f7f0000 (usable)", " (XEN) 000000003f7f0000 - 000000003f7f3000 (ACPI NVS)", " (XEN) 000000003f7f3000 - 000000003f800000 (ACPI data)", " (XEN) 00000000e0000000 - 00000000f0000000 (reserved)", " (XEN) 00000000fec00000 - 0000000100000000 (reserved)", " (XEN) Kdump: 64MB (65536kB) at 0x2000000", " (XEN) System RAM: 1015MB (1039904kB)", " (XEN) ACPI: RSDP (v000 XPC ) @ 0x000f9250", " ...", NULL }; char *xen_hyper_help_pcpus[] = { "pcpus", "display physical cpu information", "[-r][-t] [pcpu-id | pcpup] ...", " This command displays physical cpu information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " pcpup a physical cpu pointer.", " cur-vcpu a current virtual cpu pointer.", " -r display register information.", " -t display init_tss information.", " ", " The crashing physical cpu will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the physical cpu status of all:\n", " %s> pcpus", " PCID PCPU CUR-VCPU", " 0 ff1a3fb4 ffbf9080", " * 1 ff1dbfb4 ffbf8080", " %s>", " ", " Show the physical cpu status of all with register information:\n", " %s> pcpus -r", " PCID PCPU CUR-VCPU", " * 0 ff1b7fb4 ffbef080", " Register information:", " struct cpu_user_regs {", " ebx = 0x0,", " ecx = 0xdcf4bed8,", " edx = 0xc0326887,", " esi = 0x63,", " edi = 0x0,", " ebp = 0xdcf4bee0,", " eax = 0x25,", " error_code = 0x6,", " entry_vector = 0xe,", " eip = 0xc01014a7,", " cs = 0x61,", " saved_upcall_mask = 0x0,", " _pad0 = 0x0,", " eflags = 0x202,", " esp = 0xdcf4bed0,", " ss = 0x69,", " _pad1 = 0x0,", " es = 0x7b,", " _pad2 = 0x0,", " ds = 0x7b,", " _pad3 = 0x0,", " fs = 0x0,", " _pad4 = 0x0,", " gs = 0x0,", " _pad5 = 0x0", " }", " ", " Show the physical cpu status of all with init_tss information:\n", " %s> pcpus -t", " PCID PCPU CUR-VCPU", " * 0 ff1b7fb4 ffbef080", " init_tss information:", " struct tss_struct {", " back_link = 0x0,", " __blh = 0x0,", " esp0 = 0xff1b7fe8,", " ss0 = 0xe010,", " __ss0h = 0x0,", " esp1 = 0xdcf4bff8,", " ss1 = 0x69,", " __ss1h = 0x0,", " esp2 = 0x0,", " ss2 = 0x0,", " __ss2h = 0x0,", " __cr3 = 0x0,", " eip = 0x0,", " eflags = 0x0,", " eax = 0x0,", " ecx = 0x0,", " edx = 0x0,", " ebx = 0x0,", " esp = 0x0,", " ebp = 0x0,", " esi = 0x0,", " edi = 0x0,", " es = 0x0,", " __esh = 0x0,", " cs = 0x0,", " __csh = 0x0,", " ss = 0x0,", " __ssh = 0x0,", " ds = 0x0,", " __dsh = 0x0,", " fs = 0x0,", " __fsh = 0x0,", " gs = 0x0,", " __gsh = 0x0,", " ldt = 0x0,", " __ldth = 0x0,", " trace = 0x0,", " bitmap = 0x8000,", " __cacheline_filler = \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"", " }", NULL }; char *xen_hyper_help_sched[] = { "pcpus", "display scheduler information", "[-v] [pcpu-id] ...", " This command displays scheduler information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " -v display verbosely scheduler information.", " ", NULL }; char *xen_hyper_help_sys[] = { "sys", "system data", "[-c [name|number]] config", " This command displays system-specific data. If no arguments are entered,\n" " the same system data shown during %s invocation is shown.\n", "\nEXAMPLES", " Display essential system information:\n", " %s> sys", " DEBUG KERNEL: xen-syms", " DUMPFILE: vmcore", " CPUS: 2", " DOMAINS: 2", " MACHINE: Pentium III (Coppermine) (866 Mhz)", " MEMORY: 2 GB", " %s>", NULL }; char *xen_hyper_help_vcpu[] = { "vcpu", "display contents of vcpu struct", "[vcpup] ...", " This command displays contents of vcpu struct for selected, or all, vcpus", " vcpu-id a virtual cpu id.", " vcpup a virtual cpu pointer.", NULL }; char *xen_hyper_help_vcpus[] = { "vcpus", "display vcpu status information", "[-i domain-id vcpu-id | vcpup] ...", " This command displays vcpu status for selected, or all, vcpus" , " domain-id a domain id.", " vcpu-id a VCPU-ID.", " vcpup a hexadecimal struct vcpu pointer.", " -i specify vcpu id as an argument.", " ", " 1. the VCPU-ID.", " 2. the physical CPU-ID.", " 3. the struct vcpu pointer.", " 4. the vcpu state (RU, BL, OF).", " 5. the TYPE of domain that vcpu is assigned(I, 0, G).", " 6. the DOMAIN-ID of domain that vcpu is assigned.", " 7. the struct domain pointer of domain that vcpu is assigned.", " ", " The active vcpu on each CPU will be highlighted by an angle ", " bracket (\">\") preceding its information.", " The crashing vcpu on each CPU will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the vcpu status of all:\n", " %s> vcpus", " VCID PCID VCPU ST T DOMID DOMAIN", " 0 0 ffbfe080 RU I 32767 ffbfc080", " 1 1 ff1df080 RU I 32767 ffbfc080", " >* 0 0 ff195180 RU 0 0 ff198080", " > 1 1 ff190080 BL 0 0 ff198080", " 0 1 ff18a080 BL G 4 ffbee080", " 1 0 ff189080 BL G 4 ffbee080", " 0 1 ff1f3080 BL G 5 ff186080", " 1 0 ff1f2080 BL G 5 ff186080", " %s>", NULL }; struct task_context fake_tc = { 0 }; #endif crash-7.1.4/help.c0000775000000000000000000135732012634305150012434 0ustar rootroot/* help.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015 David Anderson * Copyright (C) 2002-2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static void reshuffle_cmdlist(void); static int sort_command_name(const void *, const void *); static void display_commands(void); static void display_copying_info(void); static void display_warranty_info(void); static void display_output_info(void); static void display_input_info(void); static void display_README(void); static char *gnu_public_license[]; static char *gnu_public_license_v3[]; static char *version_info[]; static char *output_info[]; static char *input_info[]; static char *README[]; static void dump_registers(void); #define GPLv2 2 #define GPLv3 3 #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) static int GPL_version = GPLv2; #else static int GPL_version = GPLv3; #endif static char *program_usage_info[] = { "", "USAGE:", "", " crash [OPTION]... NAMELIST MEMORY-IMAGE[@ADDRESS] (dumpfile form)", " crash [OPTION]... [NAMELIST] (live system form)", "", "OPTIONS:", "", " NAMELIST", " This is a pathname to an uncompressed kernel image (a vmlinux", " file), or a Xen hypervisor image (a xen-syms file) which has", " been compiled with the \"-g\" option. If using the dumpfile form,", " a vmlinux file may be compressed in either gzip or bzip2 formats.", "", " MEMORY-IMAGE", " A kernel core dump file created by the netdump, diskdump, LKCD", " kdump, xendump or kvmdump facilities.", "", " If a MEMORY-IMAGE argument is not entered, the session will be", " invoked on the live system, which typically requires root privileges", " because of the device file used to access system RAM. By default, ", " /dev/crash will be used if it exists. If it does not exist, then ", " /dev/mem will be used; but if the kernel has been configured with ", " CONFIG_STRICT_DEVMEM, then /proc/kcore will be used. It is permissible", " to explicitly enter /dev/crash, /dev/mem or /proc/kcore.", "", " An @ADDRESS value must be appended to the MEMORY-IMAGE if the dumpfile", " is a raw RAM dumpfile that has no header information describing the file", " contents. Multiple MEMORY-IMAGE@ADDRESS ordered pairs may be entered,", " with each dumpfile containing a contiguous block of RAM, where the ADDRESS", " value is the physical start address of the block expressed in hexadecimal.", " The physical address value(s) will be used to create a temporary ELF header", " in /var/tmp, which will only exist during the crash session.", "", " mapfile", " If the NAMELIST file is not the same kernel that is running", " (live system form), or the kernel that was running when the system", " crashed (dumpfile form), then the System.map file of the original ", " kernel should be entered on the command line.", "", " -h [option]", " --help [option]", " Without an option argument, display a crash usage help message.", " If the option argument is a crash command name, the help page", " for that command is displayed. If it is the string \"input\", a", " page describing the various crash command line input options is", " displayed. If it is the string \"output\", a page describing command", " line output options is displayed. If it is the string \"all\", then", " all of the possible help messages are displayed. After the help", " message is displayed, crash exits.", "", " -s ", " Silently proceed directly to the \"crash>\" prompt without displaying", " any version, GPL, or crash initialization data during startup, and by", " default, runtime command output is not passed to any scrolling command.", "", " -i file", " Execute the command(s) contained in \"file\" prior to displaying ", " the \"crash>\" prompt for interactive user input.", "", " -d num ", " Set the internal debug level. The higher the number, the more", " debugging data will be printed when crash initializes and runs.", "", " -S ", " Use /boot/System.map as the mapfile.", "", " -e vi | emacs", " Set the readline(3) command line editing mode to \"vi\" or \"emacs\". ", " The default editing mode is \"vi\".", "", " -f ", " Force the usage of a compressed vmlinux file if its original", " name does not start with \"vmlinux\".", "", " -k ", " Indicate that the NAMELIST file is an LKCD \"Kerntypes\" debuginfo file.", "", " -g [namelist]", " Determine if a vmlinux or xen-syms namelist file contains debugging data.", "", " -t ", " Display the system-crash timestamp and exit.", "", " -L ", " Attempt to lock all of its virtual address space into memory by", " calling mlockall(MCL_CURRENT|MCL_FUTURE) during initialization.", " If the system call fails, an error message will be displayed,", " but the session continues.", "", " -c tty-device", " Open the tty-device as the console used for debug messages.", "", " -p page-size", " If a processor's page size cannot be determined by the dumpfile, ", " and the processor default cannot be used, use page-size.", "", " -o filename", " Only used with the MEMORY-IMAGE@ADDRESS format for raw RAM dumpfiles,", " specifies a filename of a new ELF vmcore that will be created and used", " as the dumpfile. It will be saved to allow future use as a standalone", " vmcore, replacing the original raw RAM dumpfile.", "", " -m option=value", " --machdep option=value", " Pass an option and value pair to machine-dependent code. These", " architecture-specific option/pairs should only be required in", " very rare circumstances:", "", " X86_64:", " phys_base=", " irq_eframe_link=", " max_physmem_bits=", " vm=orig (pre-2.6.11 virtual memory address ranges)", " vm=2.6.11 (2.6.11 and later virtual memory address ranges)", " vm=xen (Xen kernel virtual memory address ranges)", " vm=xen-rhel4 (RHEL4 Xen kernel virtual address ranges)", " PPC64:", " vm=orig", " vm=2.6.14 (4-level page tables)", " IA64:", " phys_start=", " init_stack_size=", " vm=4l (4-level page tables)", " ARM:", " phys_base=", " ARM64:", " phys_offset=", "", " -x ", " Automatically load extension modules from a particular directory.", " The directory is determined by the following order of precedence:", "", " (1) the directory specified in the CRASH_EXTENSIONS shell ", " environment variable", " (2) /usr/lib64/crash/extensions (64-bit architectures)", " (3) /usr/lib/crash/extensions (32-bit architectures)", " (4) the ./extensions subdirectory of the current directory", "", " --active", " Track only the active task on each cpu.", "", " --buildinfo", " Display the crash binary's build date, the user ID of the builder,", " the hostname of the machine where the build was done, the target", " architecture, the version number, and the compiler version.", "", " --memory_module modname", " Use the modname as an alternative kernel module to the crash.ko", " module that creates the /dev/crash device.", "", " --memory_device device", " Use device as an alternative device to the /dev/crash, /dev/mem", " or /proc/kcore devices.", "", " --log dumpfile", " Dump the contents of the kernel log buffer. A kernel namelist", " argument is not necessary, but the dumpfile must contain the", " VMCOREINFO data taken from the original /proc/vmcore ELF header.", "", " --no_kallsyms", " Do not use kallsyms-generated symbol information contained within", " kernel module object files.", "", " --no_modules", " Do not access or display any kernel module related information.", "", " --no_ikconfig", " Do not attempt to read configuration data that was built into", " kernels configured with CONFIG_IKCONFIG.", "", " --no_data_debug", " Do not verify the validity of all structure member offsets and", " structure sizes that it uses.", "", " --no_kmem_cache", " Do not initialize the kernel's slab cache infrastructure, and", " commands that use kmem_cache-related data will not work.", "", " --no_elf_notes", " Do not use the registers from the ELF NT_PRSTATUS notes saved", " in a compressed kdump header for backtraces.", "", " --kmem_cache_delay", " Delay the initialization of the kernel's slab cache infrastructure", " until it is required by a run-time command.", "", " --readnow", " Pass this flag to the embedded gdb module, which will override", " the two-stage strategy that it uses for reading symbol tables", " from the NAMELIST. If module symbol tables are loaded during", " runtime with the \"mod\" command, the same override will occur.", "", " --smp ", " Specify that the system being analyzed is an SMP kernel.", "", " -v", " --version", " Display the version of the crash utility, the version of the", " embedded gdb module, GPL information, and copyright notices.", "", " --cpus number", " Specify the number of cpus in the SMP system being analyzed.", "", " --osrelease dumpfile", " Display the OSRELEASE vmcoreinfo string from a kdump dumpfile", " header.", "", " --hyper", " Force the session to be that of a Xen hypervisor.", "", " --p2m_mfn pfn", " When a Xen Hypervisor or its dom0 kernel crashes, the dumpfile", " is typically analyzed with either the Xen hypervisor or the dom0", " kernel. It is also possible to analyze any of the guest domU", " kernels if the pfn_to_mfn_list_list pfn value of the guest kernel", " is passed on the command line along with its NAMELIST and the ", " dumpfile.", "", " --xen_phys_start physical-address", " Supply the base physical address of the Xen hypervisor's text", " and static data for older xendump dumpfiles that did not pass", " that information in the dumpfile header.", "", " --zero_excluded", " If the makedumpfile(8) facility has filtered a compressed kdump", " dumpfile to exclude various types of non-essential pages, or has", " marked a compressed or ELF kdump dumpfile as incomplete due to", " an ENOSPC or other error during its creation, any attempt to", " read missing pages will fail. With this flag, reads from any", " of those pages will return zero-filled memory.", "", " --no_panic", " Do not attempt to find the task that was running when the kernel", " crashed. Set the initial context to that of the \"swapper\" task", " on cpu 0.", "", " --more ", " Use /bin/more as the command output scroller, overriding the", " default of /usr/bin/less and any settings in either ./.crashrc", " or $HOME/.crashrc.", "", " --less ", " Use /usr/bin/less as the command output scroller, overriding any", " settings in either ./.crashrc or $HOME/.crashrc.", "", " --CRASHPAGER", " Use the output paging command defined in the CRASHPAGER shell", " environment variable, overriding any settings in either ./.crashrc ", " or $HOME/.crashrc.", "", " --no_scroll", " Do not pass run-time command output to any scrolling command.", "", " --no_strip", " Do not strip cloned kernel text symbol names.", "", " --no_crashrc", " Do not execute the commands in either $HOME/.crashrc or ./.crashrc.", "", " --mod directory", " When loading the debuginfo data of kernel modules with the \"mod -S\"", " command, search for their object files in directory instead of in ", " the standard location.", "", " --src directory", " Search for the kernel source code in directory instead of in the", " standard location that is compiled into the debuginfo data.", "", " --reloc size", " When analyzing live x86 kernels configured with a CONFIG_PHYSICAL_START ", " value that is larger than its CONFIG_PHYSICAL_ALIGN value, then it will", " be necessary to enter a relocation size equal to the difference between", " the two values.", "", " --hash count", " Set the number of internal hash queue heads used for list gathering", " and verification. The default count is 32768.", "", " --kaslr offset | auto", " If an x86_64 kernel was configured with CONFIG_RANDOMIZE_BASE, the", " offset value is equal to the difference between the symbol values ", " compiled into the vmlinux file and their relocated KASLR value. If", " set to auto, the KASLR offset value will be automatically calculated.", "", " --minimal", " Bring up a session that is restricted to the log, dis, rd, sym,", " eval, set and exit commands. This option may provide a way to", " extract some minimal/quick information from a corrupted or truncated", " dumpfile, or in situations where one of the several kernel subsystem ", " initialization routines would abort the crash session.", "", " --kvmhost [32|64]", " When examining an x86 KVM guest dumpfile, this option specifies", " that the KVM host that created the dumpfile was an x86 (32-bit)", " or an x86_64 (64-bit) machine, overriding the automatically", " determined value.", "", " --kvmio ", " override the automatically-calculated KVM guest I/O hole size.", "", " --offline [show|hide]", " Show or hide command output that is associated with offline cpus,", " overriding any settings in either ./.crashrc or $HOME/.crashrc.", "", "FILES:", "", " .crashrc", " Initialization commands. The file can be located in the user's", " HOME directory and/or the current directory. Commands found in", " the .crashrc file in the HOME directory are executed before", " those in the current directory's .crashrc file.", "", "ENVIRONMENT VARIABLES:", "", " EDITOR ", " Command input is read using readline(3). If EDITOR is set to", " emacs or vi then suitable keybindings are used. If EDITOR is", " not set, then vi is used. This can be overridden by \"set vi\" or", " \"set emacs\" commands located in a .crashrc file, or by entering", " \"-e emacs\" on the crash command line.", "", " CRASHPAGER", " If CRASHPAGER is set, its value is used as the name of the program", " to which command output will be sent. If not, then command output", " output is sent to \"/usr/bin/less -E -X\" by default.", "", " CRASH_MODULE_PATH", " Specifies an alternative directory tree to search for kernel", " module object files.", "", " CRASH_EXTENSIONS", " Specifies a directory containing extension modules that will be", " loaded automatically if the -x command line option is used.", "", NULL }; void program_usage(int form) { if (form == SHORT_FORM) { fprintf(fp, "\nUsage:\n\n"); fprintf(fp, "%s\n%s\n", program_usage_info[3], program_usage_info[4]); fprintf(fp, "\nEnter \"%s -h\" for details.\n", pc->program_name); clean_exit(1); } else { FILE *scroll; char *scroll_command; char **p; if ((scroll_command = setup_scroll_command()) && (scroll = popen(scroll_command, "w"))) fp = scroll; else scroll = NULL; for (p = program_usage_info; *p; p++) { fprintf(fp, *p, pc->program_name); fprintf(fp, "\n"); } fflush(fp); if (scroll) pclose(scroll); clean_exit(0); } } /* * Get an updated count of commands for subsequent help menu display, * reshuffling the deck if this is the first time or if something's changed. */ void help_init(void) { struct command_table_entry *cp; struct extension_table *ext; for (pc->ncmds = 0, cp = pc->cmd_table; cp->name; cp++) { if (!(cp->flags & HIDDEN_COMMAND)) pc->ncmds++; } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) pc->ncmds++; } } if (!pc->cmdlist) { pc->cmdlistsz = pc->ncmds; if ((pc->cmdlist = (char **) malloc(sizeof(char *) * pc->cmdlistsz)) == NULL) error(FATAL, "cannot malloc command list space\n"); } else if (pc->ncmds > pc->cmdlistsz) { pc->cmdlistsz = pc->ncmds; if ((pc->cmdlist = (char **)realloc(pc->cmdlist, sizeof(char *) * pc->cmdlistsz)) == NULL) error(FATAL, "cannot realloc command list space\n"); } reshuffle_cmdlist(); } /* * If the command list is modified during runtime, re-shuffle the list * for proper help menu display. */ static void reshuffle_cmdlist(void) { int i, cnt; struct command_table_entry *cp; struct extension_table *ext; for (i = 0; i < pc->cmdlistsz; i++) pc->cmdlist[i] = NULL; for (cnt = 0, cp = pc->cmd_table; cp->name; cp++) { if (!(cp->flags & HIDDEN_COMMAND)) pc->cmdlist[cnt++] = cp->name; } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) pc->cmdlist[cnt++] = cp->name; } } if (cnt > pc->cmdlistsz) error(FATAL, "help table malfunction!\n"); qsort((void *)pc->cmdlist, (size_t)cnt, sizeof(char *), sort_command_name); } /* * The help list is in alphabetical order, with exception of the "q" command, * which has historically always been the last command in the list. */ static int sort_command_name(const void *name1, const void *name2) { char **s1, **s2; s1 = (char **)name1; s2 = (char **)name2; if (STREQ(*s1, "q")) return 1; return strcmp(*s1, *s2); } /* * Get help for a command, to dump an internal table, or the GNU public * license copying/warranty information. */ void cmd_help(void) { int c; int oflag; oflag = 0; while ((c = getopt(argcnt, args, "efNDdmM:ngcaBbHhkKsvVoptTzLxOr")) != EOF) { switch(c) { case 'e': dump_extension_table(VERBOSE); return; case 'f': dump_filesys_table(VERBOSE); return; case 'n': case 'D': dumpfile_memory(DUMPFILE_MEM_DUMP); return; case 'x': dump_text_value_cache(VERBOSE); return; case 'd': dump_dev_table(); return; case 'M': dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); return; case 'm': dump_machdep_table(0); return; case 'g': dump_gdb_data(); return; case 'N': dump_net_table(); return; case 'a': dump_alias_data(); return; case 'b': dump_shared_bufs(); return; case 'B': dump_build_data(); return; case 'c': dump_numargs_cache(); return; case 'H': dump_hash_table(VERBOSE); return; case 'h': dump_hash_table(!VERBOSE); return; case 'k': dump_kernel_table(!VERBOSE); return; case 'K': dump_kernel_table(VERBOSE); return; case 's': dump_symbol_table(); return; case 'V': dump_vm_table(VERBOSE); return; case 'v': dump_vm_table(!VERBOSE); return; case 'O': dump_offset_table(NULL, TRUE); return; case 'o': oflag = TRUE; break; case 'T': dump_task_table(VERBOSE); return; case 't': dump_task_table(!VERBOSE); return; case 'p': dump_program_context(); return; case 'z': fprintf(fp, "help options:\n"); fprintf(fp, " -a - alias data\n"); fprintf(fp, " -b - shared buffer data\n"); fprintf(fp, " -B - build data\n"); fprintf(fp, " -c - numargs cache\n"); fprintf(fp, " -d - device table\n"); fprintf(fp, " -D - dumpfile contents/statistics\n"); fprintf(fp, " -e - extension table data\n"); fprintf(fp, " -f - filesys table\n"); fprintf(fp, " -h - hash_table data\n"); fprintf(fp, " -H - hash_table data (verbose)\n"); fprintf(fp, " -k - kernel_table\n"); fprintf(fp, " -K - kernel_table (verbose)\n"); fprintf(fp, " -L - LKCD page cache environment\n"); fprintf(fp, " -M machine specific\n"); fprintf(fp, " -m - machdep_table\n"); fprintf(fp, " -n - dumpfile contents/statistics\n"); fprintf(fp, " -o - offset_table and size_table\n"); fprintf(fp, " -p - program_context\n"); fprintf(fp, " -r - dump registers from dumpfile header\n"); fprintf(fp, " -s - symbol table data\n"); fprintf(fp, " -t - task_table\n"); fprintf(fp, " -T - task_table plus context_array\n"); fprintf(fp, " -v - vm_table\n"); fprintf(fp, " -V - vm_table (verbose)\n"); fprintf(fp, " -x - text cache\n"); return; case 'L': dumpfile_memory(DUMPFILE_ENVIRONMENT); return; case 'r': dump_registers(); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, COMPLETE_HELP); if (!args[optind]) { if (oflag) dump_offset_table(NULL, FALSE); else display_help_screen(""); return; } do { if (oflag) dump_offset_table(args[optind], FALSE); else cmd_usage(args[optind], COMPLETE_HELP|MUST_HELP); optind++; } while (args[optind]); } static void dump_registers(void) { if (pc->flags2 & QEMU_MEM_DUMP_ELF) { dump_registers_for_qemu_mem_dump(); return; } else if (DISKDUMP_DUMPFILE()) { dump_registers_for_compressed_kdump(); return; } else if (NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) { dump_registers_for_elf_dumpfiles(); return; } error(FATAL, "-r option not supported on %s\n", ACTIVE() ? "a live system" : "this dumpfile type"); } /* * Format and display the help menu. */ void display_help_screen(char *indent) { int i, j, rows; char **namep; help_init(); fprintf(fp, "\n%s", indent); rows = (pc->ncmds + (HELP_COLUMNS-1)) / HELP_COLUMNS; for (i = 0; i < rows; i++) { namep = &pc->cmdlist[i]; for (j = 0; j < HELP_COLUMNS; j++) { fprintf(fp,"%-15s", *namep); namep += rows; if ((namep - pc->cmdlist) >= pc->ncmds) break; } fprintf(fp,"\n%s", indent); } fprintf(fp, "\n%s%s version: %-6s gdb version: %s\n", indent, pc->program_name, pc->program_version, pc->gdb_version); fprintf(fp, "%sFor help on any command above, enter \"help \".\n", indent); fprintf(fp, "%sFor help on input options, enter \"help input\".\n", indent); fprintf(fp, "%sFor help on output options, enter \"help output\".\n", indent); #ifdef NO_LONGER_TRUE fprintf(fp, "%sFor the most recent version: " "http://www.missioncriticallinux.com/download\n\n", indent); #else fprintf(fp, "\n"); #endif } /* * Used for generating HTML pages, dump the commands in the order * they would be seen on the help menu, i.e., from left-to-right, row-by-row. * Line ends are signaled with a "BREAK" string. */ static void display_commands(void) { int i, j, rows; char **namep; help_init(); rows = (pc->ncmds + (HELP_COLUMNS-1)) / HELP_COLUMNS; for (i = 0; i < rows; i++) { namep = &pc->cmdlist[i]; for (j = 0; j < HELP_COLUMNS; j++) { fprintf(fp,"%s\n", *namep); namep += rows; if ((namep - pc->cmdlist) >= pc->ncmds) { fprintf(fp, "BREAK\n"); break; } } } } /* * Help data for a command must be formatted using the following template: "command-name", "command description line", "argument-usage line", "description...", "description...", "description...", NULL, * The first line is concatenated with the second line, and will follow the * help command's "NAME" header. * The first and third lines will also be concatenated, and will follow the * help command's "SYNOPSIS" header. If the command has no arguments, enter * a string consisting of a space, i.e., " ". * The fourth and subsequent lines will follow the help command's "DESCRIPTION" * header. * * The program name can be referenced by using the %%s format. The final * entry in each command's help data string list must be a NULL. */ char *help_foreach[] = { "foreach", "display command data for multiple tasks in the system", "[[pid | taskp | name | state | [kernel | user]] ...]\n" " command [flag] [argument]", " This command allows for an examination of various kernel data associated", " with any, or all, tasks in the system, without having to set the context", " to each targeted task.\n", " pid perform the command(s) on this PID.", " taskp perform the command(s) on task referenced by this hexadecimal", " task_struct pointer.", " name perform the command(s) on all tasks with this name. If the", " task name can be confused with a foreach command name, then", " precede the name string with a \"\\\". If the name string is", " enclosed within \"'\" characters, then the encompassed string", " must be a POSIX extended regular expression that will be used", " to match task names.", " user perform the command(s) on all user (non-kernel) threads.", " kernel perform the command(s) on all kernel threads.", " active perform the command(s) on the active thread on each CPU.", " state perform the command(s) on all tasks in the specified state, which", " may be one of: RU, IN, UN, ST, ZO, TR, SW, DE, WA or PA.\n", " If none of the task-identifying arguments above are entered, the command", " will be performed on all tasks.\n", " command select one or more of the following commands to be run on the tasks", " selected, or on all tasks:\n", " bt run the \"bt\" command (optional flags: -r -t -l -e -R -f -F", " -o -s -x -d)", " vm run the \"vm\" command (optional flags: -p -v -m -R -d -x)", " task run the \"task\" command (optional flags: -R -d -x)", " files run the \"files\" command (optional flag: -c -R)", " net run the \"net\" command (optional flags: -s -S -R -d -x)", " set run the \"set\" command", " ps run the \"ps\" command (optional flags: -G -s -p -c -t -l -a", " -g -r)", " sig run the \"sig\" command (optional flag: -g)", " vtop run the \"vtop\" command (optional flags: -c -u -k)\n", " flag Pass this optional flag to the command selected.", " argument Pass this argument to the command selected.", " ", " A header containing the PID, task address, cpu and command name will be", " pre-pended before the command output for each selected task. Consult the", " help page of each of the command types above for details.", "\nEXAMPLES", " Display the stack traces for all tasks:\n", " %s> foreach bt", " PID: 4752 TASK: c7680000 CPU: 1 COMMAND: \"xterm\"", " #0 [c7681edc] schedule at c01135f6", " (void)", " #1 [c7681f34] schedule_timeout at c01131ff", " (24)", " #2 [c7681f64] do_select at c0132838", " (5, c7681fa4, c7681fa0)", " #3 [c7681fbc] sys_select at c0132dad", " (5, 8070300, 8070380, 0, 0)", " #4 [bffffb0c] system_call at c0109944", " EAX: 0000008e EBX: 00000005 ECX: 08070300 EDX: 08070380 ", " DS: 002b ESI: 00000000 ES: 002b EDI: 00000000 ", " SS: 002b ESP: bffffadc EBP: bffffb0c ", " CS: 0023 EIP: 402259ee ERR: 0000008e EFLAGS: 00000246 ", " ", " PID: 557 TASK: c5600000 CPU: 0 COMMAND: \"nfsd\"", " #0 [c5601f38] schedule at c01135f6", " (void)", " #1 [c5601f90] schedule_timeout at c01131ff", " (c5600000)", " #2 [c5601fb8] svc_recv at c805363a", " (c0096f40, c5602800, 7fffffff, 100, c65c9f1c)", " #3 [c5601fec] (nfsd module) at c806e303", " (c5602800, c5602800, c0096f40, 6c6e0002, 50)", " #4 [c65c9f24] kernel_thread at c010834f", " (0, 0, ext2_file_inode_operations)", " ", " PID: 824 TASK: c7c84000 CPU: 0 COMMAND: \"mingetty\"", " ...\n", " Display the task_struct structure for each \"bash\" command:\n", " %s> foreach bash task", " ...\n", " Display the open files for all tasks:\n", " %s> foreach files", " ...\n", " Display the state of tasks whose name contains a match to \"event.*\":\n", " %s> foreach 'event.*' task -R state", " PID: 99 TASK: ffff8804750d5500 CPU: 0 COMMAND: \"events/0\"", " state = 1,", " ", " PID: 100 TASK: ffff8804750d4ac0 CPU: 1 COMMAND: \"events/1\"", " state = 1,", " ", " PID: 101 TASK: ffff8804750d4080 CPU: 2 COMMAND: \"events/2\"", " state = 1,", " ...\n", " Display the stack traces for all blocked (TASK_UNINTERRUPTIBLE) tasks:\n", " %s> foreach UN bt", " PID: 428 TASK: ffff880036b6c560 CPU: 1 COMMAND: \"jbd2/dm-1-8\"", " #0 [ffff880035779a70] __schedule at ffffffff815df272", " #1 [ffff880035779b08] schedule at ffffffff815dfacf", " #2 [ffff880035779b18] io_schedule at ffffffff815dfb7f", " #3 [ffff880035779b38] sleep_on_page at ffffffff81119a4e", " #4 [ffff880035779b48] __wait_on_bit at ffffffff815e039f", " #5 [ffff880035779b98] wait_on_page_bit at ffffffff81119bb8", " #6 [ffff880035779be8] filemap_fdatawait_range at ffffffff81119ccc", " #7 [ffff880035779cd8] filemap_fdatawait at ffffffff81119d8b", " #8 [ffff880035779ce8] jbd2_journal_commit_transaction at ffffffff8123a99c", " #9 [ffff880035779e58] kjournald2 at ffffffff8123ee7b", " #10 [ffff880035779ee8] kthread at ffffffff8108fb9c", " #11 [ffff880035779f48] kernel_thread_helper at ffffffff815ebaf4", " ...\n", NULL }; char *help_ascii[] = { "ascii", "translate a hexadecimal string to ASCII", "value ...", " Translates 32-bit or 64-bit hexadecimal values to ASCII. If no argument", " is entered, an ASCII chart is displayed.", "\nEXAMPLES", " Translate the hexadecimal value of 0x62696c2f7273752f to ASCII:", "\n %s> ascii 62696c2f7273752f", " 62696c2f7273752f: /usr/lib", "\n Display an ASCII chart:", "\n %s> ascii", " ", " 0 1 2 3 4 5 6 7", " +-------------------------------", " 0 | NUL DLE SP 0 @ P ' p", " 1 | SOH DC1 ! 1 A Q a q", " 2 | STX DC2 \" 2 B R b r", " 3 | ETX DC3 # 3 C S c s", " 4 | EOT DC4 $ 4 D T d t", " 5 | ENQ NAK \% 5 E U e u", " 6 | ACK SYN & 6 F V f v", " 7 | BEL ETB ` 7 G W g w", " 8 | BS CAN ( 8 H X h x", " 9 | HT EM ) 9 I Y i y", " A | LF SUB * : J Z j z", " B | VT ESC + ; K [ k {", " C | FF FS , < L \\ l |", " D | CR GS _ = M ] m }", " E | SO RS . > N ^ n ~", " F | SI US / ? O - o DEL", NULL }; char *help_quit[] = { "quit", "exit this session", " ", " Bail out of the current %s session.", "\nNOTE", " This command is equivalent to the \"exit\" command.", NULL }; char *help_exit[] = { "exit", "exit this session", " ", " Bail out of the current %s session.", "\nNOTE", " This command is equivalent to the \"q\" command.", NULL }; char *help_help[] = { "help", "get help", "[command | all] [-