#!/usr/xpg4/bin/sh
# OPC_WHAT_STRING="@(#)HP OpenView Operations A.07.10 (05/07/02)"
###############################################################################
#
# File:         opcvmsinst
# Description:  OpenVMS INSTALL_SCRIPT script
# Language:     Bourne Shell
# Package:      HP OpenView IT/Operations
#
# (c) Copyright Hewlett-Packard Co. 1993 - 2004
#
###############################################################################

#
#  DESCRIPTION
#    OpenVMS INSTALL_SCRIPT script.
#    This script installs ITO software in Managed Node.
#
#==============================================================================
#    Following is list of actions for installation part:
#
#      0. check, if Management Server is known on Managed Node
#         by asking the Name Server or just /etc/hosts
#         This tasks will do nslookup utility or nsl program, which
#         must be copied to Managed Node.
#         NOTE: This check belongs to prolog phase, but it is implemented here
#         because it is slow operation, when nsl program must be copied to 
#         Managed Node.
#
#      1. If Managed Node is cluster client system, 
#         save Managed Node values and substitute them by 
#         Manage Node's cluster server system.
#         This does unix common function: opcuxenvsave 
#         Parameters :
#           SAVE
#         Output environment variables:
#           OPC_NODE OPC_PASSWD_REQ OPC_PASSWD OPC_TMP_DIR
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#         If return code not equals 0, display error and skip this node.
#
#      2. copy ITO software's tape image file on $OPC_NODE, 
##        if ITO software must be installed and 
#         $OPC_ACCESS_METHOD equals default  Management Server distribution.
#         This does unix common function: $OPC_RCOPY_SCRIPT (see opcuxrcopy(1m))
#         Parameters :
#           ITO_tape_image_file
#           $OPC_TAPE_DIR/opc.Z
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#           2 .. ERROR  
#         If return code not equals 0, display error and skip this node.
#
#      3. copy ITO nodeinfo and licensing info file on $OPC_NODE,
##        if Managed Node is not cluster client system
#         This does unix common function: $OPC_RCOPY_SCRIPT (see opcuxrcopy(1m))
#         Parameters :
#           ITO_nodeinfo_file
#           $OPC_TMP_DIR/nodeinfo
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#           2 .. ERROR  
#         If return code not equals 0, display error and skip this node.
#
#      4. remote copy and execute opcrinst script, 
#         which will install ITO software on $OPC_NODE.
#         This does unix common function: opcuxrcpsh (see opcuxrcpsh(1m))
#         Parameters:
#           opcrinst
#           <opcrinst_parameters..>
#         Return code:
#           0 .. OK
#           1 .. ERROR 
#         If return code equals 1, display error and skip this node.
#
#      5. if Managed Node is stand-alone system or cluster server system
#         go to action 8.
#         Otherwise restore Managed Node values.
#         This does unix common function: opcuxenvsave
#         Parameters :
#           RESTORE
#         Output environment variables:
#           OPC_NODE OPC_PASSWD_REQ OPC_PASSWD 
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#         If return code not equals 0, display error and skip this node.
#
#      6. copy ITO nodeinfo and licensing file on $OPC_NODE.
#         This does unix common function: $OPC_RCOPY_SCRIPT (see opcuxrcopy(1m))
#         Parameters :
#           ITO_nodeinfo_file
#           $OPC_TMP_DIR/nodeinfo
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#           2 .. ERROR  
#         If return code not equals 0, display error and skip this node.
#
#      7. remote copy and execute rinst script, which will install ITO software
#         on $OPC_NODE.
#         This does unix common function: opcuxrcpsh (see opcuxrcpsh(1m))
#         Parameters:
#           opcrinst
#           <opcrinst_parameters..>
#         Return code:
#           0 .. OK
#           1 .. ERROR 
#         If return code equals 1, display error and skip this node.
#
#
#      8. Exit
#     
#     
#==============================================================================
#    Following is list of actions for removal part:
#
#      1. remote copy and execute opcrinst script, 
#         which will de-install ITO software on $OPC_NODE.
#         This does unix common function: opcuxrcpsh (see opcuxrcpsh(1m))
#         Parameters:
#           opcrinst
#           <opcrinst_parameters..>
#         Return code:
#           0 .. OK
#           1 .. ERROR 
#         If return code equals 1, display error and skip this node.
#
#      2. if Managed Node is stand-alone system or cluster server system
#         go to action 5.
#         Otherwise save Managed Node values and substitute them by 
#         Manage Node's cluster server system.
#         This does unix common function: opcuxenvsave 
#         Parameters :
#           SAVE
#         Output environment variables:
#           OPC_NODE OPC_PASSWD_REQ OPC_PASSWD 
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#         If return code not equals 0, display error and skip this node.
#
#      3. remote copy and execute opcrinst script, 
#         which will de-install ITO software on Managed Node.
#         This does unix common function: opcuxrcpsh (see opcuxrcpsh(1m))
#         Parameters:
#           opcrinst
#           <opcrinst_parameters..>
#         Return code:
#           0 .. OK
#           1 .. ERROR 
#         If return code equals 1, display error and skip this node.
#
#      4. restore Managed Node values.
#         This does unix common function: opcuxenvsave
#         Parameters :
#           RESTORE
#         Output environment variables:
#           OPC_NODE OPC_PASSWD_REQ OPC_PASSWD 
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#         If return code not equals 0, display error and skip this node.
#
#      5. Exit
#     
#     
     
#
#  PARAMETERS
#
#
#  EXIT_CODE
#    0 .. OK work completed 
#    1 .. ERROR installation/de-installation failed 
#    2 .. ERROR installation succeeded on Managed Node's cluster server 
#         but failed on Managed Node;
#         de-installation succeeded on Managed Node 
#         but failed on Managed Node's cluster server
#

# defined in every script - the version of this script
echo $*
INST_OPCVERS=09.01.180

#-------------------------------------
# Module name definition
#-------------------------------------

# define module name

#VMS Changes Starts
. "$OPC_AGTINST_DIR"/opcvmsproc
. "$OPC_AGTINST_DIR"/opcvmsprocs
#VMS Changes Ends

#==============================================================================
#
#  DESCRIPTION
#  Generate nodeinfo file
#
#  ENVIRONMENT VARIABLES
#    ND_FILE .. name of generated nodeinfo file
#
#  EXIT_CODE
#    0 .. OK
#    1 .. ERROR  
#
get_nodeinfo()
{
    HEX_IP_ADDR=`${OPC_BIN_DIR}/opcsw -get_nodeinfo ${MNGD_NODE} 2>/dev/null`
    if [ $? -ne 0 ]
    then
        #   nodeinfo file can't be generated
        #
        oi_error "opc" 214 ${MNGD_NODE}
        return 1
    fi
    ND_FILE=${OPCSVR_TMP_DIR}/distrib/${HEX_IP_ADDR}.n
    unset HEX_IP_ADDR
    return 0
}

#==============================================================================
#
#  DESCRIPTION
#    Create directory on OPC_NODE node
#
#  PARAMETER
#    dir_name .. path-name of directory to be created
#
#  EXIT CODE
#      0 .. task successfully done
#      1 .. error
#      2 .. directory alredy exists
#

mk_node_dir()
{
    DIR_NAME=$1
    
    if [ $ACTIVATE_ONLY = 1 ]
    then
        $OPC_REXEC_SCRIPT opcrmkdir CREATE ${DIR_NAME}
        case $? in
        0)
        ;;
        127)
            #   Can't create directory
            #
            oi_error "opc" 061 ${DIR_NAME} ${OPC_NODE}
            return 1
        ;;
        *)
            return 2
        ;;
        esac
    fi

    return 0
}

#==============================================================================
#
#  DESCRIPTION
#    Create OPC_TMP_DIR on OPC_NODE node
#
#  ENVIRONMENT
#    OPC_TMP_DIR .. path-name of directory to be created
#    OPC_TMP_CREATED .. 1 when temporary directory was created
#
#  EXIT CODE
#      0 .. task successfully done
#      1 .. error
#

mk_tmp_dir()
{
    OPC_TMP_CREATED=0
    mk_node_dir ${OPC_TMP_DIR}
    case $? in
        0)
            OPC_TMP_CREATED=1
            return 0
            ;;
        1)
            return 1
            ;;
        2)
            return 0
            ;;
    esac
}

#==============================================================================
#
#  DESCRIPTION
#    Remove OPC_TMP_DIR on OPC_NODE node
#
#  ENVIRONMENT
#    OPC_TMP_DIR .. path-name of directory to be created
#
#  EXIT CODE
#      0 .. task successfully done
#      1 .. error
#

rm_tmp_dir()
{
    if [ ${OPC_TMP_CREATED} = 1 ]
    then
        # temporary directory was created
        #
        OPC_TMP_CREATED=0
        if [ $ACTIVATE_ONLY = 1 ]
        then
            $OPC_REXEC_SCRIPT opcrmkdir REMOVE ${OPC_TMP_DIR}
            if [ $? -eq 127 ]
            then
                return 1
            fi
        fi
    fi
    
    return 0
}

#==============================================================================
#
#  DESCRIPTION
#    Re-Start OpC on OPC_NODE node
#
#
#  EXIT CODE
#      0 .. task successfully done
#      1 .. error
#

restart_opc()
{
    # control agent is alive; re-start ITO agent
    #
    ${OPCSVR_BIN_DIR}/opcragt -start ${OPC_NODE} >/dev/null 2>/dev/null
    if [ $? -eq 0 ]
    then
        return 0
    fi
    return 1
}

#==============================================================================
#
#   MAIN script body
#

# display start module
if [ "${OPC_ADEBUG}" = "OPCAYES" ]
then
    set -x -v
    DEBUG_CNT=`expr $DEBUG_CNT + 1`
fi
if [ "${OPC_TRACE}" = "YES" ]
then
    CALLOUT_NAME=$0
    CALLOUT_PARAMS="$*"
    #oi_trace opc 588 "${MODULE_NAME}" ${CALLOUT_NAME} "\"${CALLOUT_PARAMS}\""
    echo "TRACE:    ${TRACE_HEADER}" | \
    sed -e "s=MODULxxxxxxx=${MODULE_NAME}=" \
        -e "s=SCRIPTxxxxxx=${CALLOUT_NAME}=" \
    >> ${OPC_TRACE_FILE}
fi
if [ "${OPC_DEBUG}" = "YES" ]
then
    # enable tracing in all functions
    eval `typeset -f | awk '/^function/ { printf "typeset -tf %s\n", $2 }'`
fi
if [ "${OPC_ADEBUG}" = "OPCAYES" ]
then
    if [ ${DEBUG_CNT} -gt 0 ]
    then
        DEBUG_CNT=`expr $DEBUG_CNT - 1`
    fi
    if [ ${DEBUG_CNT} -gt 0 ]
    then
        set -x
    fi
fi

#   input parameters
#
if [ $# -ne 3 -a $# -ne 7 ]
then
    #   usage error
    #
    oi_error "opc" 564
    EXIT 1
fi

#   set OPC_NODE environment variables - specific part 
#   obtained from input parameters
#

OPC_OVERRIDE=$1; shift
OPC_RESOLVED_IP=$1; shift
OPC_NODE_KIND=$1; shift 
if [ $# -ne 0 ]
then
    OPC_CL_NODE=$1; shift
    OPC_CL_IP_ADDRESS=$1; shift
    OPC_CL_PASSWD_REQ=$1; shift 
    OPC_CL_PASSWD=$1; shift 
    if [ "$OPC_CL_PASSWD" = "OPC_UNKNOWN_PASSWORD" ]
    then
        OPC_CL_PASSWD=""                  #set password to empty string
    fi
else
    OPC_CL_NODE=""
    OPC_CL_IP_ADDRESS=""
    OPC_CL_PASSWD_REQ=""
    OPC_CL_PASSWD=""
fi

if [ "${OPC_STATUS}" = "NOTREADY" ]
then
    #   skip this OPC_NODE
    #
    EXIT 1
fi

export OPC_OVERRIDE 
export OPC_NODE_KIND
export OPC_CL_NODE OPC_CL_PASSWD_REQ OPC_CL_PASSWD

#   set global environment variables
#
MNGD_NODE=${OPC_NODE}

SW_FILE=ovo8-v0800-1-i64.tar
#VMSSW_FILE=opc_pkg.zip

SPI_FILE=ovo8-v0800-1-i64.tar

#PERF_FILE=perf_pkg.Z
#VMSPERF_FILE=perf_pkg.zip

# SWNSP_FILES="nsp_pkg gss_pkg ent_pkg"

NODE_FILE=${OPC_TMP_DIR}/nodeinfo

OPCSETUP_INFO=opcsetup.inf

OPC_TMP_CREATED=0

FTP_CMD5=""

OPC_DEPOT_NODE="VOID"
if [ "${OPC_METHOD}" = "INSTALL" -a "${ACTIVATE_ONLY}" = "0" ]
then
#==============================================================================
#    INSTALLATION part
#

    if [ "${OPC_DEPOT_NODE}" = "VOID" -o "${OPC_DEPOT_NODE}" = "${OPC_NODE}" ]
    then
      #   No Installation server given
      #

      #   generate and transfer opcsetup.inf file
      #
      echo "[Setup Drive]\r"         >${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo ${OPC_DEPOT_NAME}"\r"    >>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo "[Management Server]\r"  >>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo ${OPC_MGMT_SV}"\r"       >>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo "[Account Password]\r"   >>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo ${WNT_ACCOUNT_PASSWD}"\r">>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo "[HP ITO Version]\r"     >>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo ${OPC_VERSION}"\r"       >>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo "[Agent Architecture]\r" >>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      echo ${OPC_PLATFORM}"\r"      >>${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}
      FTP_CMD1=""

      #   generate nodeinfo file
      #
      get_nodeinfo
      if [ $? -ne 0 ]
      then
        EXIT 1
      fi
     
      #VMS Changes Starts

      OPC_RBTTXE=`$OPC_AGTINST_DIR/opccrpt -x qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm -d "$OPC_PASSWD"`
      # quote it in a second step because sed needs a newline at the end
      OPC_RBTTXE=`echo ${OPC_RBTTXE} | sed 's/ /\\\\ /g' | sed 's/"/\\\\"/g'`

      OPC_INST_USER="SYSTEM"
      export OPC_INST_USER
      OPC_TOPC="/tmp"
      export OPC_NODE
      export OPC_INST_USER
      export OPC_PASSWD
      export OPC_TOPC
      rm -f ${OPC_TOPC}/ftp.out

     FTP_CMD1="cd SYS\$SYSROOT:[SYSUPD]"
     FTP_CMD2="prompt off"
     FTP_CMD3="binary"
     FTP_CMD4="lcd $OPC_INST_DIR/$OPC_PLATFORM/$OPC_VERSION/$OPC_PACKAGE_TYPE"
     FTP_CMD5="mput *.PCSI*"
     FTP_CMD6="ascii"
     FTP_CMD7="mput *.COM*"
     FTP_CMD8="mput *.xml*"
     opcvmsftp "$FTP_CMD1" "$FTP_CMD2" "$FTP_CMD3" "$FTP_CMD4" "$FTP_CMD5" "$FTP_CMD6" "$FTP_CMD7" "$FTP_CMD8"

      if [ $? -ne 1 ]
      then
        #   file couldn't be transferred to the managed node
        #
        oi_verbose opc 377
        echo
        EXIT 1
      fi
     
      #   remove temporary file opcsetup.inf
      rm -f ${OPCSVR_TMP_DIR}/distrib/${OPCSETUP_INFO}

      if [ "${OPC_DEPOT_NODE}" = "VOID" ]
      then
            echo
            echo
            printf "          " \c
            FTP_CMD1="ascii"
            FTP_CMD2="put $OPC_INST_DIR/$OPC_PLATFORM/$OPC_VERSION/$OPC_PACKAGE_TYPE/install/opcvmsods5disk.com opcvmsods5disk.com"
            opcvmsftp "$FTP_CMD1" "$FTP_CMD2"
      	    if [ $? -ne 1 ]
            then
               #   file couldn't be transferred to the managed node
               #
               oi_verbose opc 377
               echo
               EXIT 1
            fi

            $OPC_CMD_DIR/opc_rexec $OPC_NODE $OPC_INST_USER ${OPC_RBTTXE} "@SYS\$LOGIN:opcvmsods5disk.com ${OPC_SW_SIZE}"
            $OPC_CMD_DIR/opc_rexec $OPC_NODE $OPC_INST_USER ${OPC_RBTTXE} "del SYS\$LOGIN:opcvmsods5disk.com;*"

	   DISK_NAME=`grep -i ${OPC_NODE} ${VMS_NODE_TO_DISK} | awk 'BEGIN { FS = "|" } ; { print $2 }'`
	   case ${DISK_NAME} in
	       "")
        	 echo "Disk name is null. Please check the file ${VMS_NODE_TO_DISK}"
         	EXIT 1
       		;;
       		*)
        	echo "The chosen disk installed is ${DISK_NAME} for the node ${OPC_NODE}"
                echo "HTTPS Agents will install on the selected diskname= ${DISK_NAME}" 
       		;;
	    esac

            echo "HTTPS Agents will install on the selected diskname= ${DISK_NAME} ----${OPC_NODE}----"  
            echo "Error message may be displayed at the server side even after successful installation/upgradation."
            echo "due to connection or buffering problems."
            echo "Please check the status of installation/upgradation in PCSI database on the OpenVMS managed node."

            ${OPC_CMD_DIR}/opc_rexec ${OPC_NODE} "$OPC_INST_USER" ${OPC_RBTTXE} "@SYS\$UPDATE:OPC_INST.COM  -i -srv ${OPC_MGMT_SV} -cert_srv ${OPC_MGMT_SV} ${DISK_NAME}" | tee ${VMS_INST_LOG} 
            grep "SUCCESSFULLY" ${VMS_INST_LOG} > /dev/null
            if [ $? -ne 0 ]
            then
                echo
                echo "          Error installing product OVOAGENTS on ${OPC_NODE}."
                EXIT 1 
            fi
      else
        ${OPCSVR_BIN_DIR}/opcragt -init_install ${OPC_DEPOT_NODE} \
                                                local_reinstall   \
                                                local_reinstall   \
                                                local_reinstall   \
                                                0                 \
                                                ${OPC_NODE}       \
                                 > /dev/null 2> ${VMS_GREP_OUTPUT}
        if [ -s ${VMS_GREP_OUTPUT} ]
        then
          #   There was an error while calling the init_install function on the
          #   Installation server
          #
          oi_verbose opc 365 ${OPC_DEPOT_NODE}
          echo
          EXIT 1
        else
          #   init_install for the installation was ok
          #
          oi_note opc 367
        fi
      fi
    else
      #   Installation server given
      #

      #   generate nodeinfo file
      #
      get_nodeinfo
      if [ $? -ne 0 ]
      then
        EXIT 1
      fi

      #   Initiate transfer and remote execution of installation package
      #
      if [ -z "${WNT_ACCOUNT_PASSWD}" ]
      then
        WNT_ACCOUNT_PASSWD=_
      fi
      if [ "${OPC_DEPOT_NAME}" = "VOID" ]
      then
        OPC_DEPOT_NAME=_
      fi
      ${OPCSVR_BIN_DIR}/opcragt -init_install ${OPC_DEPOT_NODE}     \
                                              ${OPC_PLATFORM}       \
                                              ${OPC_VERSION}        \
                                              ${WNT_ACCOUNT_PASSWD} \
                                              ${OPC_DEPOT_NAME}     \
                                              ${OPC_NODE}           \
                               > /dev/null 2> ${VMS_GREP_OUTPUT}
      if [ -s ${VMS_GREP_OUTPUT} -o $? -ne 0 ]
      then
        #   There was an error while calling the init_install function on the
        #   Installation server
        #
        oi_verbose opc 378 ${OPC_NODE} ${OPC_DEPOT_NODE}
        echo
        EXIT 1
      else
        #   init_install for the installation was ok
        #
        oi_note opc 367
      fi
    fi

elif [ "${OPC_METHOD}" = "INSTALL" -a "${ACTIVATE_ONLY}" = "1" ]
then
#==============================================================================
#    ACTIVATION part
#
    METHOD="ACTIVATE"
    EXIT 0

else
#==============================================================================
#    REMOVAL part
#
    METHOD="REMOVE"

      OPC_RBTTXE=`$OPC_AGTINST_DIR/opccrpt -x qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm -d "$OPC_PASSWD"`
      # quote it in a second step because sed needs a newline at the end
      OPC_RBTTXE=`echo ${OPC_RBTTXE} | sed 's/ /\\\\ /g' | sed 's/"/\\\\"/g'`
    
      OPC_INST_USER="SYSTEM"
      export OPC_INST_USER
      opcvmspwget
      CWD=`pwd`
      cd $TMP_DIR

      FTP_CMD1="ascii"
      FTP_CMD2="put $OPC_INST_DIR/$OPC_PLATFORM/$OPC_VERSION/$OPC_PACKAGE_TYPE/OPC_INST.COM OPC_INST.COM"

      opcvmsftp "${FTP_CMD1}" "${FTP_CMD2}" "" "" "" "" "" "" "" ""

      if [ $? -ne 1 ]
      then
        #   file couldn't be transferred to the managed node
        #
        oi_verbose opc 377
        echo
        EXIT 1
      fi

      echo "Error message may be displayed at the server side even after successful uninstallation."
      echo "due to connection or buffering problems."
      echo "Please check the status of uninstallation in PCSI database on the OpenVMS managed node."

      ${OPC_CMD_DIR}/opc_rexec ${OPC_NODE} "$OPC_INST_USER" ${OPC_RBTTXE} "@SYS\$LOGIN:OPC_INST.COM -r" | tee ${VMS_UNINST_LOG}
      grep "UNINSTALLED" ${VMS_UNINST_LOG} > /dev/null
      if [ $? -ne 0 ]
      then
        echo "Unable to uninstall the agents on ${OPC_NODE}."
        EXIT 1 
      fi

      ${OPC_CMD_DIR}/opc_rexec ${OPC_NODE} "$OPC_INST_USER" ${OPC_RBTTXE} "del SYS\$LOGIN:OPC_INST.COM;*"

    #   remove package files
    #   execute the init_install call with the parameters for the 
    #   deinstallation of the node
    #
#    ${OPCSVR_BIN_DIR}/opcragt -init_install ${OPC_NODE}     \
#                                            deinstall       \
#                                            deinstall       \
#                                            deinstall       \
#                                            0               \
#                                            ${OPC_NODE}     \
#                             > /dev/null 2> ${VMS_GREP_OUTPUT}
#      if [ -s ${VMS_GREP_OUTPUT} ]
#      then
        #   There was an error while calling the init_install function for 
        #   the deinstallation on the node
        #
        # check if deinstallation failed because of wrong mgmt_sv
#       grep "OpC40-474" ${VMS_GREP_OUTPUT} >/dev/null 2>&1
#       if [ $? -eq 0 ] ; then
#               oi_verbose opc 81 ${OPC_NODE} "\"unknown\"" ${OPC_MGMT_SV}
#       else
#               oi_verbose opc 366 ${OPC_NODE} ${OPC_DEPOT_NODE}
#       fi
#        echo
#        EXIT 1
#      else
        #   init_install for the deinstallation was ok
        #
#        oi_note opc 367
#      fi
fi

EXIT 0

###############################################################################
# end of opcvmsinst
###############################################################################
