kdump fails to create a vmcore file on HP Serviceguard for Linux with the message "mount: can't find /mnt in /etc/fstab"

Solution In Progress - Updated -

Issue

  • kdump fails to create a vmcore file on HP Serviceguard for Linux with the message "mount: can't find /mnt in /etc/fstab".
  • When performing SysRq-C below log messages are observed in the console when kdump fails to create a vmcore file to the perticular directory (/var/crash) specified in /etc/kdump.conf which is on the Logical Volume on the internal disk device.
...skipping...

Making device-mapper control node
Scanning logical volumes
  Reading all physical volumes.  This may take a while...
  Found volume group "vg00" using metadata type lvm2
Activating logical volumes
  Not activating vg00/lv_root since it does not pass activation filter.
  Not activating vg00/lv_swap since it does not pass activation filter. 
  0 logical volume(s) in volume group "vg00" now active
Device /dev/mapper/vg00-lv_root not found
Command failed
Device /dev/mapper/vg00-lv_swap not found
Command failed
Free memory/Total memory (free %): 68208 / 113728 ( 59.9747 )
Saving to the local filesystem UUID=eacd3e24-4b55-4c88-a6b4-8291d9f1ec36
Usage: fsck.ext4 [-panyrcdfvtDFV] [-b superblock] [-B blocksize]
        [-I inode_buffer_blocks] [-P process_inode_size]
        [-l|-L bad_blocks_file] [-C fd] [-j external_journal]
        [-E extended-options] device

Emergency help:
 -p                   Automatic repair (no questions)
 -n                   Make no changes to the filesystem
 -y                   Assume "yes" to all questions
 -c                   Check for bad blocks and add them to the badblock list
 -f                   Force checking even if filesystem is marked clean
 -v                   Be verbose
 -b superblock        Use alternative superblock
 -B blocksize         Force blocksize when looking for superblock
 -j external_journal  Set location of the external journal
 -l bad_blocks_file   Add to badblocks list
 -L bad_blocks_file   Set badblocks list
mount: can't find /mnt in /etc/fstab
Restarting system.

...skipping...
  • We have the following configurations for activation filter in /etc/lvm/lvm.conf.
# cat etc/lvm/lvm.conf | grep -v "#" | grep -v ^$
devices {
    dir = "/dev"
    scan = [ "/dev" ]
    obtain_device_list_from_udev = 1
    preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
    filter = [ "a/.*/" ]
    cache_dir = "/etc/lvm/cache"
    cache_file_prefix = ""
    write_cache_state = 1
    sysfs_scan = 1
    multipath_component_detection = 1
    md_component_detection = 1
    md_chunk_alignment = 1
    data_alignment_detection = 1
    data_alignment = 0
    data_alignment_offset_detection = 1
    ignore_suspended_devices = 0
    disable_after_error_count = 0
    require_restorefile_with_uuid = 1
    pv_min_size = 2048
    issue_discards = 0
}
allocation {

    maximise_cling = 1
    mirror_logs_require_separate_pvs = 0
    thin_pool_metadata_require_separate_pvs = 0
}
log {
    verbose = 0
    silent = 0
    syslog = 1
    overwrite = 0
    level = 0
    indent = 1
    command_names = 0
    prefix = "  "
}
backup {
    backup = 1
    backup_dir = "/etc/lvm/backup"
    archive = 1
    archive_dir = "/etc/lvm/archive"
    retain_min = 10
    retain_days = 30
}
shell {
    history_size = 100
}
global {
    umask = 077
    test = 0
    units = "h"
    si_unit_consistency = 1
    activation = 1
    proc = "/proc"
    locking_type = 1
    wait_for_locks = 1
    fallback_to_clustered_locking = 1
    fallback_to_local_locking = 1
    locking_dir = "/var/lock/lvm"
    prioritise_write_locks = 1
    abort_on_internal_errors = 0
    detect_internal_vg_cache_corruption = 0
    metadata_read_only = 0
    mirror_segtype_default = "mirror"
    use_lvmetad = 0
    thin_check_executable = "/usr/sbin/thin_check"
    thin_check_options = [ "-q" ]
}
activation {
    checks = 0
    udev_sync = 1
    udev_rules = 1
    verify_udev_operations = 0
    retry_deactivation = 1
    missing_stripe_filler = "error"
    use_linear_target = 1
    reserved_stack = 64
    reserved_memory = 8192
    process_priority = -18
    mirror_region_size = 512
    readahead = "auto"
    raid_fault_policy = "warn"
    mirror_log_fault_policy = "allocate"
    mirror_image_fault_policy = "remove"
    snapshot_autoextend_threshold = 100
    snapshot_autoextend_percent = 20
    thin_pool_autoextend_threshold = 100
    thin_pool_autoextend_percent = 20
    use_mlockall = 0
    monitoring = 1
    polling_interval = 15
}
dmeventd {
    mirror_library = "libdevmapper-event-lvm2mirror.so"
    snapshot_library = "libdevmapper-event-lvm2snapshot.so"
    thin_library = "libdevmapper-event-lvm2thin.so"
}
tags { hosttags = 1 }       <---
volume_list = [ "vg00" ]  <---
  • The volume is private which is not shared between the cluster members thus we should have below line in lvm.conf as described in HP kbase article given here

Environment

  • RHEL6.4 (2.6.32-358.23.2.el6.x86_64)
  • HP Serviceguard for Linux (serviceguard-A.11.20.22-0.rhel6.x86_64)

Subscriber exclusive content

A Red Hat subscription provides unlimited access to our knowledgebase of over 48,000 articles and solutions.

Current Customers and Partners

Log in for full access

Log In
Close

Welcome! Check out the Getting Started with Red Hat page for quick tours and guides for common tasks.