Logo Search packages:      
Sourcecode: lcrash version File versions  Download package

kl_kern_x86_64.c

/*
 * $Id: kl_kern_x86_64.c,v 1.1 2004/12/21 23:26:20 tjm Exp $
 *
 * This file is part of libklib.
 * A library which provides access to Linux system kernel dumps.
 *
 * Created by Silicon Graphics, Inc.
 * Contributions by IBM, NEC, and others
 *
 * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
 * Copyright (C) 2001, 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
 * Copyright 2000 Junichi Nomura, NEC Solutions <j-nomura@ce.jp.nec.com>
 *
 * This code is free software; you can redistribute it and/or modify
 * it under the terms of the GNU Lesser Public License as published by
 * the Free Software Foundation; either version 2.1 of the License, or
 * (at your option) any later version. See the file COPYING for more
 * information.
 *
 * Added support for X86_64 architecture Mar 2004
 *      Prashanth Tamraparni (prasht@in.ibm.com)
 *      Sachin Sant (sachinp@in.ibm.com)
 */

#include <klib.h>

/* 
 * kl_kernelstack_x86_64()
 */
kaddr_t
kl_kernelstack_x86_64(kaddr_t task)
{
      kaddr_t saddr = 0, thread_info;
      void *tsp;

      if ((tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) {
            kl_get_task_struct(task, 2, tsp);
            if (!KL_ERROR) {
                  if (LINUX_2_6_X(KL_LINUX_RELEASE)) {
                        thread_info = kl_kaddr(tsp, "task_struct", 
                              "thread_info");
                        saddr = thread_info + KL_KSTACK_SIZE_X86_64;
                  } else {
                        saddr = (task + KL_KSTACK_SIZE_X86_64);
                  }
            }
            kl_free_block(tsp);
      }
      return(saddr);
} 

kaddr_t
kl_fix_vaddr_x86_64(kaddr_t vaddr, size_t sz)
{
      kl_dump_header_x86_64_t dha;
      kaddr_t addr;
      int i;

      if (kl_get_dump_header_x86_64(&dha)){
            return vaddr;
      }

      /*
       * We look thru the saved snapshots of the task structures and if
       * the requested memory from vaddr thru vaddr + sz is within the
       * snapshot then we return the requested address within the snapshot.
       *
       * If this is a request for the saved task struct then vaddr should
       * be page aligned. Otherwise original vaddr is returned even
       * if a part of the range vaddr to vaddr+sz falls within the range
       * of saved task_struct+stack.
       */
      for (i = 0; i < KL_GET_UINT32(&dha.smp_num_cpus); i++) {
            if (dha.smp_regs[i].rip < KL_START_KERNEL_map_X86_64){
                  /* if task is in user space,
                   * no need to look at saved stack */
                  continue; 
            }
            if (LINUX_2_6_X(KL_LINUX_RELEASE)) {
                  addr = KL_GET_PTR(&dha.stack_ptr[i]);
            } else if (LINUX_2_4_X(KL_LINUX_RELEASE)) {
                  addr = KL_GET_PTR(&dha.smp_current_task[i]);
                  if(vaddr == addr) {
                        return vaddr;
                  }
            } else {
                  /* Unknown linux release, don't bother with snapshots */
                  return vaddr;
            }
            if (vaddr >= addr && vaddr + sz <=  addr + KL_KSTACK_SIZE_X86_64)
                  return (kaddr_t)(dha.stack[i] + (vaddr - addr));
      }
      return vaddr;
}

/*
 * Name: kl_init_virtop_x86_64()
 * Func: initialize virtual to physical address translation
 *       This function must at least initialize high_memory and init_mm.
 */
int
kl_init_virtop_x86_64(void)
{
      syment_t *sp;
      if(!(sp = kl_lkup_symname("high_memory"))){
                /* XXX set error code */
                return(1);
        } else {
                /* Read (void *) high_memory from dump.
                   We need to make sure that the address is not virtually
                   mapped kernel data. We have to do this by hand since
                   we haven't got virtop mechanism working yet...
                   */ 
                KL_HIGH_MEMORY = KL_READ_PTR(sp->s_addr - KL_START_KERNEL_map_X86_64);
                if (KL_ERROR) {
                        KL_HIGH_MEMORY = (kaddr_t) -1;
                        /* XXX set error code */
                        return(1);
                }
        } 
      if(!KL_HIGH_MEMORY){
                KL_HIGH_MEMORY = (kaddr_t) -1;
        }


      
      /* Get the address of init_mm and convert it to a physical address
       * so that we can make direct calls to kl_readmem(). We make a call
       * to kl_vtop() since we have not finished setting up for calls to
       * kl_virtop().
       */
        if (!(sp = kl_lkup_symname("init_mm"))) {
            /* XXX set error code */
                return(1);
      } else {
            KL_INIT_MM = sp->s_addr - KL_START_KERNEL_map_X86_64;
      }

      sp = kl_lkup_symname("num_physpages");
      if(!sp){
            /* XXX set error code */
            return(1);
      }
      /* unsigned long num_physpages */
      NUM_PHYSPAGES = KL_READ_PTR(sp->s_addr - KL_START_KERNEL_map_X86_64);
      if (KL_ERROR) {
            /* XXX set error code */
            return(1);
      }

      /* Get the start address of the kernel page table
       */
      if (!(sp = kl_lkup_symname("mem_map"))) {
            /*
             * 'mem_map' symbol will not be defined for DISCONTIG memory.
             * lcrash supports DISCONTIG memory from 2.6 onwards.
             */
            if (!(sp  = kl_lkup_symname("pgdat_list"))) {
                  /* XXX set error code */
                  return(1);
            } else {
                  MEM_MAP = 0;
                  KL_PGDAT_LIST= KL_VREAD_PTR(sp->s_addr);
                  if (KL_ERROR) {
                        /* XXX set error code */
                        return(1);
                  }
            }
      } else {
            /* mem_map_t * mem_map */
            MEM_MAP = KL_READ_PTR(sp->s_addr - KL_START_KERNEL_map_X86_64);
            if (KL_ERROR) {
                  /* XXX set error code */
                  return(1);
            }
      }

      /* Get the cr4 settings. This will tell us if the system is PAE enabled
       *
       */
      sp = kl_lkup_symname("mmu_cr4_features");
      if(sp){
            KL_KERNEL_FLAGS = KL_READ_PTR(sp->s_addr - KL_START_KERNEL_map_X86_64);
            if (KL_ERROR) {
                  /* XXX set error code */
                  return(1);
            }
      } else {
            return(1);
      }

      return(0);
}

/*
 * kl_virtop_x86_64()
 *
 *   Translate a virtual address into a physical address. 
 */
int
kl_virtop_x86_64(kaddr_t vaddr, void *m, kaddr_t *paddr)
{
      int mm_alloced = 0;
      void *mmp = m;

      *paddr = (kaddr_t) NULL;

      kl_reset_error();

      if (!mmp && KL_KADDR_IS_PHYSICAL_X86_64(vaddr)) {

            if (vaddr >= KL_START_KERNEL_map_X86_64) 
                  *paddr = (unsigned long)(vaddr) - KL_START_KERNEL_map_X86_64; 
            else
                  *paddr = (unsigned long)(vaddr) - KL_PAGE_OFFSET_X86_64;     

      } else if (mmp || KL_INIT_MM) {
            /* Treat address as logical and map to a physical one */
            if (!mmp) {
                  if((mmp = kl_alloc_block(MM_STRUCT_SZ, K_TEMP))) {
                        kl_readmem(KL_INIT_MM, MM_STRUCT_SZ, mmp);
                        if (KL_ERROR) {
                              kl_free_block(mmp);
                              mmp = NULL;
                        } else {
                              mm_alloced++;
                        }
                  }
            }
            if (mmp) {
                  *paddr = KL_MMAP_VIRTOP(vaddr, mmp);
                  if(KL_ERROR){
                        KL_ERROR = KLE_INVALID_MAPPING;
                  }
            }
      } else {
            /* Treat as a physical address but make sure
             * the address does not exceed maximum physical
             * memory.
             */
            if(vaddr > KL_PAGE_OFFSET_X86_64){
                  vaddr -= KL_PAGE_OFFSET_X86_64;
            }
            if ((vaddr >> KL_PAGE_SHIFT) < NUM_PHYSPAGES) {
                  *paddr = vaddr;
            } else {
                  KL_ERROR = KLE_INVALID_PADDR;
            }
      }

      if (mm_alloced) {
            kl_free_block(mmp);
      }     

      if(KL_ERROR){
            *paddr = (kaddr_t) NULL;
            return(1);
      } else {
            return(0);
      }
}

Generated by  Doxygen 1.6.0   Back to index