#!/usr/xpg4/bin/sh
# OPC_WHAT_STRING="@(#)HP Operations Manager 09.02.030 (09/04/09)"

set +u
################################################################################
#
# File:         opcuxprlg
# Description:  Generic unix family PROLOG_SCRIPT script
# Language:     Bourne Shell
# Package:      HP OpenView IT/Operations
#
# (c) Copyright 1993 - 2009 Hewlett-Packard Development Company, L.P. 
#
################################################################################

#
#  DESCRIPTION
#    Generic unix family PROLOG_SCRIPT script.
#    This script does all the actions, which are prescribed for callout
#    PROLOG_SCRIPT. Additionaliy it performs actions to support generic 
#    unix cluster.
#    Following is list of all actions (actions for generic unix cluster support
#    are marked with *):
#
#      1. create OPC_TMP_DIR directory on Managed Node, if it doesn't exist.
#         Mark this fact and remove OPC_TMP_DIR from Managed Node when
#         prolog actions are done. The same procedure is performed for
#         Managed Node's cluster server system, if it is involved.
#         
#      2. check OS of Managed Node.
#         This does callout $OPC_OS_CHECK script (see OS_CHECK(1m))
#         Parameters:
#           result_file
#         Return code:
#           0 .. OK
#           1 .. ERROR in OS
#         If return code equals 1, display error and skip this node.
#         Otherwise get the actual OS version from <result_file>,
#         use actual OS version to determinate if ITO Managed Node
#         software support this OS version. If no, skip this node.
#         Remove temporary result_file.
#
#     *3. check Managed Node kind.
#         This does callout $OPC_NODE_CHECK script ( see NODE_CHECK(1m))
#         Parameters:
#           result_file
#         Return code:
#           0 .. OK
#           1 .. ERROR in Node kind
#         If return code equals 1, display error and skip this node.
#         Otherwise get the cluster server system name from <result_file>.
#         If it is empty, Managed Node is stand-alone system; 
#         go to action 7.
#
#     *4. Managed Node is cluster client system. Perform test for cluster
#         server system. Before that, save Managed Node values.
#         This does unix common function: opcuxenvsave (see ??)
#         Parameters :
#           SAVE
#         Output environment variables:
#           OPC_NODE OPC_PASSWD_REQ OPC_PASSWD 
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#         If return code equals 1, display error and skip this node.
#
#     *5. check password for cluster server system.
#         This is unix common function: opcuxpwget (see opcuxpwget(1m))
#         Parameters :
#         Output environment variables:
#           OPC_PASSWD_REQ OPC_PASSWD
#         Return code:
#           0 .. OK
#           1 .. ERROR in password 
#         If return code equals 1, display error and skip this node.
#
#
#     *6. check OS of Managed Node's cluster server system.
#         This does callout $OPC_CLUSTER_CHECK script ( see CLUSTER_CHECK(1m))
#         Parameters:
#         Return code:
#           0 .. OK
#           2 .. ERROR
#         If return code equals 1, display error and skip this node.
#         NOTE: environment variable OPC_NODE is still set on Managed Node's
#               cluster server system.
#
#      7. check ITO software version.
#         This does callout $OPC_VERSION_CHECK script ( see VERSION_CHECK(1m))
#         Parameters:
#           result_file
#         Return code:
#           0 .. ITO software not yet installed
#           1 .. ITO software not yet installed, but installation not
#                possible
#           2 .. ITO software already installed, but Managed Node belongs
#                to different Management server;
#                the name of actual Management Server system 
#                is delivered in result_file
#           3 .. same version of ITO software already installed
#           4 .. different version of ITO software already installed;
#                the installed version is delivered in result_file
#         
#         If environment variable OPC_METHOD equals "INSTALL" (installation 
#         part),
#         then return code is evaluated. 
#         Following is list of appropriate actions:
#           0 :  continue 
#           1 :  skip this node
#           2 :  if actual Management Server system name equals "unknown"
#                ask user for confirmation of installation;
#                otherwise perform local actions on this node
#           3 :  ask user for confirmation of installation
#           4 :  ask user for confirmation of installation, if
#                installed ITO version is greater then actual one
#         Following environment variables will be set:
#           OPC_PREV_VERS, OPC_OVERRIDE
#
#         If environment variable OPC_METHOD equals "REMOVE" (removal part),
#         then return code is evaluated. 
#         Following is list of appropriate actions:
#           0 :  skip this node
#           1 :  skip this node
#           2 :  if actual Management Server system name equals "unknown"
#                ask user for confirmation of installation and go to action 10;
#                otherwise skip this node
#           3 :  go to action 10 
#           4 :  go to action 10 
#
#      8. check disks space on Managed node.
#         Required disks space for ITO software depends on whether or not ITO 
#         software is already installed on Managed Node. If ITO software is
#         already installed, only delta between installed and actual
#         ITO software's size is required.
#         Check disks space  does callout $OPC_SPACE_CHECK 
#         script ( see SPACE_CHECK(1m))
#         Parameters:
#           ITO_tape_size
#	    ITO_sw_size
#           result_file
#         Return code:
#           0 .. OK
#           1 .. not enough free disk space
#         If return code equals 1, display error and skip this node.
#         Otherwise get the actual OPC_TAPE_DIR from <result_file>.
#     
#
#     x.  check if Management Server system is reachable from Managed Node
#         When server has several IP addresses , select (ask user)
#         appropriate one and store it into work file (install script shoud
#         passed it to remote script, which will patch opcinfo file).
#         If test fails skip this node.
#
#     9.  If Managed Node is cluster client system, save cluster server
#         environment variables and restore Managed Node ones.
#         This does unix common function: opcuxenvsave (see oi_??)
#         Parameters :
#           RESTORE
#         Output environment variables:
#           OPC_NODE OPC_PASSWD_REQ OPC_PASSWD 
#           OPC_CL_NODE OPC_CL_PASSWD_REQ OPC_CL_PASSWD
#         Return code:
#           0 .. OK
#           1 .. ERROR  
#         If return code equals 1, display error and skip this node.
#
#     10. Produce <work_line>.
#     
     
#
#  PARAMETERS
#    output_file .. name of temporary file where line of work_file(5) for
#                   actual node will be placed
#
#  EXIT_CODE
#    0 .. OK
#    1 .. ERROR  
#

BIN_DIR=/usr/bin
OS_DIR=/usr
OPT_DIR=/opt
VAR_DIR=/var/opt
ETC_DIR=/etc/opt

OPC_OPT_DIR=${OPT_DIR}/OV

OPCAGT_NLS_DIR=${OPC_OPT_DIR}/locale
OPCSVR_NLS_DIR=${OPC_OPT_DIR}/locale

# localization
LANG_C=C
LANG_SJIS=ja_JP.PCK
LANG_EUC=japanese

# oratab location
ORATAB_LOC=/var/opt/oracle/oratab

#
# nslookup function - Solaris nslookup does not query NIS maps
#
nslookup()
{
  _name=$1
  _result=""
  if [ -x ${OPCSVR_BIN_DIR}/utils/opcsvnsl ]
  then
    _result=`${OPCSVR_BIN_DIR}/utils/opcsvnsl ${_name} 2>&1`
  else
    if [ -x /tmp/opcsvnsl ]
    then
       _result=`/tmp/opcsvnsl ${_name} 2>&1`
    else
      if [ -x ${OPCAGT_INST_DIR}/opcnsl ]
      then
        _result=`${OPCAGT_INST_DIR}/opcnsl ${_name} 2>&1`
      else
        /usr/sbin/nslookup ${_name}
      fi
    fi
  fi
  if [ ! -z "${_result}" ]
  then
    echo ${_result} | grep 'Not found' 1>/dev/null 2>&1
    if [ $? -eq 0 ]
    then
      echo
      echo "*** No address (A) records available for ${_name}"
    else
      _rname=`echo ${_result} | cut -f1 -d:`
      _raddr=`echo ${_result} | cut -f2 -d:`
      echo
      echo "Name:    ${_rname}"
      echo "Address:  ${_raddr}"
      unset _rname
      unset _raddr
    fi
  fi
  unset _name
  unset _result
  return 0
}

GREP=/usr/xpg4/bin/grep
TR=/usr/xpg4/bin/tr

# for agents we may need a different root directory e.g. in a cluster
# environment else it is the root directory
#
ROOT_DIR=${ROOT_DIR:-""}

# defined in every script - the version of this script
INST_OPCVERS=09.02.030

# prerequisites:
#---------------
#
# the paths
#
#   OPT_DIR	e.g. /opt
#   VAR_DIR	e.g. /var/opt
#   ETC_DIR	e.g. /etc/opt
#
# must be defined before using this module
#
# Naming Scheme
# <prog>_<name>_DIR	1 directory e.g. OV_<name>_DIR, OPC_<name>_DIR
# <name>_PATH		several directories delimited by ":"
#
TMP_DIR=${ROOT_DIR}/tmp/opc_tmp

#-------------------------------------
# generic paths
#-------------------------------------
# /opt/OV
OV_OPT_DIR=${OV_OPT_DIR:-${ROOT_DIR}${OPT_DIR}/OV}
OPC_OPT_DIR=${OPC_OPT_DIR:-${OV_OPT_DIR}}
# /var/opt/OV
OV_VAR_DIR=${OV_VAR_DIR:-${ROOT_DIR}${VAR_DIR}/OV}
OV_VAR_SH_DIR=${OV_VAR_SH_DIR:-${OV_VAR_DIR}/share}
OPC_VAR_DIR=${OPC_VAR_DIR:-${OV_VAR_DIR}}
OV_DEFAULTS_PATH=${OV_VAR_DIR}/tmp
# /etc/opt/OV
OV_ETC_DIR=${OV_ETC_DIR:-${ROOT_DIR}${ETC_DIR}/OV}
OV_ETC_SH_DIR=${OV_ETC_SH_DIR:-${OV_ETC_DIR}/share}
OPC_ETC_DIR=${OPC_ETC_DIR:-${OV_ETC_DIR}/share}

OV_NEWCFG_DIR=${OV_NEWCFG_DIR:-${OV_OPT_DIR}/newconfig}
OV_OLDCFG_DIR=${OV_OLDCFG_DIR:-${OV_OPT_DIR}/old}

# WWW
OV_WWW_DIR=$OV_OPT_DIR/www

# /opt/OV
OV_OPT10_DIR=${OV_OPT10_DIR:-${ROOT_DIR}${OPT10_DIR}/OV}
# /var/opt/OV
OV_VAR10_DIR=${OV_VAR10_DIR:-${ROOT_DIR}${VAR10_DIR}/OV}
OV_VAR10_SH_DIR=${OV_VAR10_SH_DIR:-${OV_VAR10_DIR}/share}
# /etc/opt/OV
OV_ETC10_DIR=${OV_ETC10_DIR:-${ROOT_DIR}${ETC10_DIR}/OV}
OV_ETC10_SH_DIR=${OV_ETC10_SH_DIR:-${OV_ETC10_DIR}/share}

#-------------------------------------
# SD paths  
#-------------------------------------
OPC_SD_DIR=${OPC_SD_DIR:-/usr/sbin}
#-------------------------------------
# Sun Cluster paths
#-------------------------------------
OPC_SC_BIN_DIR=${OPC_SC_BIN_DIR:-/opt/SUNWcluster/bin}
OPC_SC_ETC_DIR=${OPC_SC_ETC_DIR:-/etc/opt/SUNWcluster}
OPC_SC_VAR_DIR=${OPC_SC_VAR_DIR:-/var/opt/SUNWcluster}
#-------------------------------------
# VERITAS Cluster Server paths
#-------------------------------------
OPC_VC_BIN_DIR=${OPC_VC_BIN_DIR:-/opt/VRTSvcs/bin}
OPC_VC_LOG_DIR=${OPC_VC_LOG_DIR:-/var/VRTSvcs/log}
OPC_VC_CFG_DIR=${OPC_VC_CFG_DIR:-/etc/VRTSvcs/conf}
#-------------------------------------
# Sol High Availability SW Solutions
#-------------------------------------
OPC_HA_VC_SW=${OPC_HA_VC_SW:-VeritasClusterServer}
OPC_HA_SC_SW=${OPC_HA_SC_SW:-SunCluster}

#-------------------------------------
# OpenView paths
#-------------------------------------
OV_BIN_DIR=${OV_BIN_DIR:-${OV_OPT_DIR}/bin}
OV_TMP_DIR=${OV_TMP_DIR:-${OV_VAR_DIR}/tmp}
OV_LOG_DIR=${OV_LOG_DIR:-${OV_VAR_DIR}/log}
OV_CFG_DIR=${OV_CFG_DIR:-${OV_ETC_DIR}/share/conf}
OV_ALT_CFG_DIR=${OV_ALT_CFG_DIR:-${OV_ETC_DIR}/share/\#conf}

#-------------------------------------
# ITO Server Paths
#-------------------------------------
# /usr/OV/bin/OpC
OPCSVR_BIN_DIR=${OPCSVR_BIN_DIR:-${OPC_OPT_DIR}/bin/OpC}
# /usr/OV/bin/OpC/install
OPCSVR_INST_DIR=${OPCSVR_INST_DIR:-${OPCSVR_BIN_DIR}/install}
# /opt/OV/lib
OPCSVR_LIB_DIR=${OPCSVR_LIB_DIR:-${OPC_OPT_DIR}/lib}
# /opt/OV/nls
OPCSVR_NLS_DIR=${OPCSVR_NLS_DIR:-${OPCSVR_LIB_DIR}/nls}
# /opt/OV/include
OPCSVR_INC_DIR=${OPCSVR_INC_DIR:-${OPC_OPT_DIR}/include}
# /usr/OV/bin/OpC/utils
OPCSVR_UTIL_DIR=${OPCSVR_UTIL_DIR:-${OPCSVR_BIN_DIR}/utils}
# /usr/OV/bin/OpC/agtinstall
OPCSVR_AGTINST_DIR=${OPCSVR_AGTINST_DIR:-${OPCSVR_BIN_DIR}/agtinstall}
# /etc/opt/OV/share/lrf
OPCSVR_OPC_DIR=${OPCSVR_OPC_DIR:-${OPC_OPT_DIR}/OpC}
OPCSVR_NEWCFG_DIR=${OPCSVR_NEWCFG_DIR:-${OV_NEWCFG_DIR}/OpC}
OPCSVR_OLDCFG_DIR=${OPCSVR_OLDCFG_DIR:-${OV_OLDCFG_DIR}/OpC}
OPCSVR_HELP_DIR=${OPCSVR_HELP_DIR:-${OV_VAR_DIR}/share/help}

# /etc/opt/OV directories
OPCSVR_LRF_DIR=${OPCSVR_LRF_DIR:-${OPC_ETC_DIR}/lrf}
OPCSVR_REG_DIR=${OPCSVR_REG_DIR:-${OPC_ETC_DIR}/registration}

# /var/opt/OV/share/databases
OPCSVR_DB_DIR=${OPCSVR_DB_DIR:-${OPC_VAR_DIR}/share/databases/OpC}
# /var/opt/OV/share/databases/OpC/mgd_node/vendor
OPCSVR_VEND_DIR=${OPCSVR_DB_DIR}/mgd_node/vendor
# /var/opt/OV/share/databases/OpC/mgd_node/customer
OPCSVR_CUST_DIR=${OPCSVR_DB_DIR}/mgd_node/customer

# dynamic paths
OPCSVR_TMP_DIR=${OPCSVR_TMP_DIR:-${OPC_VAR_DIR}/share/tmp/OpC}
OPCSVR_LOG_DIR=${OPCSVR_LOG_DIR:-${OPC_VAR_DIR}/log/OpC}
OPCSVR_CFG_DIR=${OPCSVR_CFG_DIR:-${OPC_ETC_DIR}/conf/OpC}
OPCSVR_APPL_DIR=${OPCSVR_APPL_DIR:-${OPC_VAR_DIR}/share/tmp/OpC_appl}
OPCSVR_MAN_DIR=${OPCSVR_MAN_DIR:-${OPC_OPT_DIR}/man}

OPCSVR_INFO_F=${OPCSVR_INFO_F:-${OPCSVR_INST_DIR}/opcsvinfo}
OPCAGT_INFO_F=${OPCAGT_INFO_F:-${OPCSVR_INST_DIR}/opcinfo}

# Config file needed by distributed GUI client
OPC_GUICLT_MSV_F=${OPC_GUICLT_MSV_F:-${OV_CFG_DIR}/opc_guiclt_msv}
OPC_GUICLT_TMP_MSV_F=${OPC_GUICLT_TMP_MSV_F:-/tmp/opc_guiclt_msv}

# DB config file
OVDB_CONFIG_FILE=${OVDB_CONFIG_FILE:-${OV_CFG_DIR}/ovdbconf}

#-------------------------------------
# ITO Agent Paths
#-------------------------------------
# /usr/OV/bin/OpC/s700
OPCAGT_BIN_DIR=${OPCAGT_BIN_DIR:-${OPC_OPT_DIR}/bin/OpC}
OPCAGT_LIB_DIR=${OPCAGT_LIB_DIR:-${OPC_OPT_DIR}/lib}
OPCAGT_NLS_DIR=${OPCAGT_NLS_DIR:-${OPCAGT_LIB_DIR}/nls}
OPCAGT_INC_DIR=${OPCAGT_INC_DIR:-${OPC_OPT_DIR}/include}
# /usr/OV/bin/OpC/s700/utils
OPCAGT_UTIL_DIR=${OPCAGT_UTIL_DIR:-${OPCAGT_BIN_DIR}/utils}
# /usr/OV/bin/OpC/s700/install
OPCAGT_INST_DIR=${OPCAGT_INST_DIR:-${OPCAGT_BIN_DIR}/install}

# /var/opt/OV/bin/OpC
OPCAGT_ACTMONCMD_DIR=${OPCAGT_ACTMONCMD_DIR:-${OPC_VAR_DIR}/bin/OpC}

# /var/opt/OV/bin/OpC/monitor
OPCAGT_MON_DIR=${OPCAGT_MON_DIR:-${OPC_VAR_DIR}/bin/OpC/monitor}
# /opt/OV/bin/OpC/actions
OPCAGT_ACT_DIR=${OPCAGT_ACT_DIR:-${OPC_VAR_DIR}/bin/OpC/actions}
# /var/opt/OV/bin/OpC/cmds
OPCAGT_CMD_DIR=${OPCAGT_CMD_DIR:-${OPC_VAR_DIR}/bin/OpC/cmds}

# dynamic paths
OPCAGT_LOG_DIR=${OPCAGT_LOG_DIR:-${OPC_VAR_DIR}/log/OpC}
OPCAGT_CFG_DIR=${OPCAGT_CFG_DIR:-${OPC_VAR_DIR}/conf/OpC}
OPCAGT_TMP_DIR=${OPCAGT_TMP_DIR:-${OPC_VAR_DIR}/tmp/OpC}
OPCAGT_VARBIN_DIR=${OPCAGT_TMP_DIR:-${OPC_VAR_DIR}/bin/OpC}
OPCAGT_TMPBIN_DIR=${OPCAGT_TMPBIN_DIR:-${OPCAGT_TMP_DIR}/bin}
OPCAGT_TMPCFG_DIR=${OPCAGT_TMPCFG_DIR:-${OPCAGT_TMP_DIR}/conf}
OPCAGT_NEWCFG_DIR=${OPCAGT_NEWCFG_DIR:-${OV_NEWCFG_DIR}/OpC}
OPCAGT_OLDCFG_DIR=${OPCAGT_OLDCFG_DIR:-${OV_OLDCFG_DIR}/OpC}

# info files
OPCAGT_NDINFO_F=${OPCAGT_NDINFO_F:-${OPCAGT_CFG_DIR}/nodeinfo}
OPCAGT_INFO_F=${OPCAGT_INFO_F:-${OPCAGT_INST_DIR}/opcinfo}
OPCAGT_NETLS_F=${OPCAGT_NETLS_F:-${OPCAGT_CFG_DIR}/opcnetls}
OPCAGT_MGRCONF_F=${OPCAGT_MGRCONF_F:-${OPCAGT_CFG_DIR}/mgrconf}
OPCAGT_UPD_F=${OPCAGT_UPD_F:-${OPCAGT_TMP_DIR}/update}
OPCAGT_CLIENT_F=${OPCAGT_CLIENT_F:-${OPCAGT_INST_DIR}/cfg.clients}
OPCAGT_MGMTSV_F=${OPCAGT_MGMTSV_F:-${TMP_DIR}/mgmt_sv.dat}
#
# commands
#
OPCMON=${OPCAGT_BIN_DIR}/opcmon
OPCMSG=${OPCAGT_BIN_DIR}/opcmsg

VUE_DIR=/etc/vue
VUE_USR_DIR=/usr/vue
VUE_ETC_DIR=/etc/vue
VUE_VAR_DIR=/var/vue
CDE_USR_DIR=/usr/dt
CDE_ETC_DIR=/etc/dt
CDE_VAR_DIR=/var/dt

#-------------------------------------
# paths
#-------------------------------------
SYSTEM_PATH=${SYSTEM_PATH:-"/usr/xpg4/bin:/bin:/usr/bin:/sbin:/usr/sbin:/etc:/usr/etc:/usr/ucb"}
OV_PATH=${OV_BIN_DIR}
OPCSVR_PATH=${OPCSVR_BIN_DIR}:${OPCSVR_AGTINST_DIR}:${OPCSVR_INST_DIR}
OPCAGT_PATH=${OPCAGT_BIN_DIR}:${OPCAGT_INST_DIR}:${OPCAGT_MON_DIR}:${OPCAGT_ACT_DIR}:${OPCAGT_CMD_DIR}

# program name
APPLNAME=ITO
LONG_APPLNAME="HP Operations Manager for Sun Solaris 09.02.030"

# common umask value
umask 022

# location of the oratab file

# shared library suffix
SHLIBSUFFIX=so

#-------------------------------------
# Module name definition
#-------------------------------------

# define module name
    MODULE_NAME=PROLOG_SCRIPT

#VMS Changes Starts
. "$OPC_AGTINST_DIR"/opcvmsproc
. "$OPC_AGTINST_DIR"/opcvmsprocs
#VMS Changes ends

#==============================================================================
#
#  Produce work_file(5) line for actual $OPC_NODE.
#
produce_work()
{

#VMS Changes Starts
          OPC_CL_PASSWD_REQ=""
          OPC_CL_PASSWD=""
          OPC_TAPE_DIR="/"
#VMS Changes ends

    if [ "${OPC_STATUS}" = "NOTREADY" -o "${OPC_STATUS}" = "INSTALLED" ]
    then
	#   skip this OPC_NODE
	#
	# skipping warning issued in opc_inst.sh
	echo "${OPC_NODE} ${OPC_STATUS}" > $OPC_OUTPUT_FILE
    else
	#   produce real work_file(5) line for this node
	#

	if [ -z "${OPC_PASSWD}" ]
	then 
	    #	zero length password reset to default value
	    #
	    OPC_PASSWD=OPC_UNKNOWN_PASSWORD
	fi

	if [ -z "${OPC_CL_NODE}" ]
	then
	    #	cluster server system is unknown
	    #
	    OPC_CL_PASSWD_REQ=""
	    OPC_CL_PASSWD=""
	else
	    if [ -z "${OPC_CL_PASSWD}" ]
	    then
		#	zero length password reset to default value
		#
		OPC_CL_PASSWD=OPC_UNKNOWN_PASSWORD
	    fi
	fi

	WORK_LINE="${OPC_NODE}"
	WORK_LINE="${WORK_LINE} ${OPC_STATUS}"
	WORK_LINE="${WORK_LINE} ${OPC_PASSWD_REQ}"
	WORK_LINE="${WORK_LINE} ${OPC_PASSWD}"
	WORK_LINE="${WORK_LINE} ${OPC_VERSION}"
	WORK_LINE="${WORK_LINE} ${OPC_TAPE_DIR}"
	WORK_LINE="${WORK_LINE} ${KERBEROS_BYPASS}"
	WORK_LINE="${WORK_LINE} ${ACTIVATE_ONLY}"
	WORK_LINE="${WORK_LINE} ${OPC_OVERRIDE}"
	#new one
	if [ -z "${OPC_RESOLVED_IP}" ]
	then
	    OPC_RESOLVED_IP="VOID"
	fi
	WORK_LINE="${WORK_LINE} ${OPC_RESOLVED_IP}"
	
	#new new one
	if [ -z "${OPC_OS_VERSION}" ]
	then
	    OPC_OS_VERSION="VOID"
	fi

#VMS Changes Starts
#	WORK_LINE="${WORK_LINE} ${OPC_OS_VERSION}"
#VMS Changes Ends
	
	WORK_LINE="${WORK_LINE} ${OPC_NODE_KIND}"
	WORK_LINE="${WORK_LINE} ${OPC_CL_NODE}"
	WORK_LINE="${WORK_LINE} ${OPC_CL_IP_ADDRESS}"
	WORK_LINE="${WORK_LINE} ${OPC_CL_PASSWD_REQ}"
	WORK_LINE="${WORK_LINE} ${OPC_CL_PASSWD}"

	echo "${WORK_LINE}" > $OPC_OUTPUT_FILE
	unset WORK_LINE
    fi
    
    # remove OPC_TMP_DIR on Managed Node
    #
    if [ "${OPC_NODE_KIND}" = "CLIENT" ]
    then
	# remove OPC_TMP_DIR on Managed Node's cluster server
	#
	opcuxenvsave SAVE
    
    fi
    rm_tmp_dir

}

#==============================================================================
#
#  DESCRIPTION
#   ask, if ITO maintenance should be continued on cluster server
#
#  ENVIRONMENT
#    OPC_CL_NODE .. the name of Managed Node's cluster server system
#
#
#  EXIT_CODE
#    0 .. OK continue with installation
#    1 .. skip this node  
#
ask_for_cluster()
{

    oi_prompt_and_ask "opc" 143 "${OPC_CL_NODE}" "" 097 "${OPC_CL_NODE}" 
    if [ "${OPC_ANSWER}" = "${OPC_OIANSWER_DEF}" ]
    then
	OPC_ANSWER=${OPC_YES_LIT}
    fi
    case "${OPC_ANSWER}" in
	[${OPC_YES_LIT}]*)
	    #	continue with installation
	    #
	    oi_verbose "opc" 125 "${OPC_CL_NODE}"
	    EXIT_VAL=0
	;;
	*)
	    EXIT_VAL=1
	;;
    esac

    unset CLUST_SV

    if [ "${EXIT_VAL}" = "0" ]
    then
	oi_prompt_and_ask opc 536 "" "" 535 $OPC_CL_NODE $OPC_CL_NODE
	if [ "$OPC_ANSWER" = $OPC_OIANSWER_DEF ]
	then
	    OPC_CL_IP_ADDRESS=$OPC_CL_NODE
	else
	    OPC_CL_IP_ADDRESS=$OPC_ANSWER
	fi
    fi	    
    
    return $EXIT_VAL
}

#==============================================================================
#
#  DESCRIPTION
#    Try to find out correct password for Managed Node's cluster server.
#    First inspect password_file(5) -> may be cluster server system is
#    also Managed Node proccessed in this run.
#    Otherwise try with Managed Node's password.
#
#  ENVIRONMENT
#    OPC_CL_NODE   .. the name of Managed Node's cluster server system
#    OPC_CL_PASSWD .. Managed Node's cluster server password
#
#
#  EXIT_CODE
#    0 .. OK 
#
get_pwd_cluster()
{
    OPC_CL_PASSWD=""
    PWD_ND=`echo ${OPC_CL_NODE} | sed -e 's/\..*//g'`
    eval `grep "^${PWD_ND}[ |\.]" ${PASSWORD_FILE} | \
	  awk '{ print "PWD_RQ=" $3
		 print "PWD_ST=" $4 }'`
    
    if [ "${PWD_RQ}" = "0" ]
    then
	# no need for password
	#
    	unset PWD_ND PWD_RQ PWD_ST
	return 0
    fi
    if [ "${PWD_RQ}" = "1" ]
    then
	OPC_CL_PASSWD="${PWD_ST}"
    fi

    if [ -z "${OPC_CL_PASSWD}" -a ${OPC_PASSWD_REQ} -eq 1 ]
    then
	# try first with Managed Node's password
	#
    	OPC_CL_PASSWD="${OPC_PASSWD}"
    fi
    unset PWD_ND PWD_RQ PWD_ST
    return 0
}

#==============================================================================
#
#  DESCRIPTION
#   add OPC_NODE in list of all nodes on wich ITO software package will be
#   installed during this run of installation script.
#   Structure od one entry is following:
#   <node_name> <platform_name> <opc_version> <opc_sw_disk_space>
#
#  EXIT_CODE
#    0 .. OK
#
add_previous_node()
{
    echo "${OPC_NODE} ${OPC_PLATFORM} ${OPC_VERSION} ${SW_SIZE}" >> \
	 ${PKG_CACHE_FILE}
    return 0
}

#==============================================================================
#
#  DESCRIPTION
#   check ITO software version installed on Managed Node.
#
#  ENVIRONMENT
#    OPC_OVERRIDE .. is set to 1 when ITO software is already installed on 
#                Managed node and must ne re-installed again;
#                or ITO will be installed for first time
#    OPC_PREV_VERS .. version of ITO software already installed on Managed Node
#
#  EXIT_CODE
#    0 .. OK
#    1 .. ERROR  
#
chk_opc_version()
{
    REC_VALUE=$1
    REC_FILE=$2
    eval `awk  'NR == 1 { printf "REC_STRING=\"%s\"\n", $0 
                          printf "REC_ALL_IP=\"%s\"\n", $0 }
		NR == 2 { printf "ACT_OPC_VER=\"%s\"\n", $0 }
		NR == 3 { printf "REC_RES_IP=\"%s\"\n", $0 }' \
	 ${REC_FILE}`

    OPC_RESOLVED_IP=${REC_RES_IP}

    EXIT_VAL=0

    if [ "${OPC_METHOD}" = "INSTALL" ]
    then
	#   installation part
	#
	chk_previous_nodes
	if [ $? -eq 1 ]
	then
	    #   ITO will be installed in this run of inst.sh for
	    #   one of previous entry in node_info file
	    #   (install only local ITO bits).
	    #
	    oi_verbose "opc" 625 "${OPC_NEW_VERSION}" "${OPC_NODE}" ITO
	    OPC_OVERRIDE=0; export OPC_OVERRIDE
	    if [ "${OPC_VERSION}" != "${OPC_NEW_VERSION}" ]
	    then 
 		oi_verbose "opc" 627 "${OPC_VERSION}" "${OPC_NEW_VERSION}" ITO
		OPC_VERSION=${OPC_NEW_VERSION}; export OPC_VERSION
	    fi
	    EXIT_VAL=0
	else
	    case ${REC_VALUE} in
	    0)
		#   ITO software not yet installed
		#   Check the results of nslookup and ping on Managed Node
		#
		get_ip_address ${REC_ALL_IP}
		if [ $? -ne 0 ]
		then
		    EXIT_VAL=1
		else
		    OPC_OVERRIDE=1; export OPC_OVERRIDE
		    EXIT_VAL=0
		fi
		;;
	    1)
		#   installation of ITO software not possible
		#
		EXIT_VAL=1
		;;
	    2)
		#   ITO software already installed, but Managed Node belongs
		#   to different Management Server
		#
		ACT_MGMT_SERVER=${REC_STRING}

		if [ "${ACT_MGMT_SERVER}" = "unknown" ]
		then
		    #	unknown Management Server indicate possibility of
		    #	partial installation of ITO software on Managed Node
		    #	ask user to confirm installation
		    #
		    oi_warning "opc" 241 ${OPC_NODE}
		    oi_prompt_and_ask "opc" 694 "${OPC_NODE}" "" 692 "${OPC_NODE}" 
		    case "${OPC_ANSWER}" in
			[${OPC_YES_LIT}]*)
			#	continue with installation
			#
			    OPC_OVERRIDE=1; export OPC_OVERRIDE
			    EXIT_VAL=0
			;;
			*)
			    oi_verbose "opc" 179 ${OPC_NODE}
			    EXIT_VAL=0
			;;
		    esac
		else
		    #	inform user
		    #
		    oi_error "opc" 081 ${OPC_NODE} ${ACT_MGMT_SERVER} \
		    ${OPC_MGMT_SV}
		    EXIT_VAL=1
		fi

		unset ACT_MGMT_SERVER
		;;
	    3|4)
		if [ "${REC_VALUE}" = 3 ]
		then
		    #   ITO software with same version already installed
		    #
		    oi_verbose "opc" 066 ${OPC_VERSION} ${OPC_NODE}
                    OPC_PREV_VERS=${OPC_VERSION}; export OPC_PREV_VERS
		else
		    #   ITO software with different version already installed
		    #
		    oi_verbose "opc" 067 ${ACT_OPC_VER} ${OPC_NODE} ${OPC_VERSION}
		    OPC_PREV_VERS=${ACT_OPC_VER}; export OPC_PREV_VERS
		    unset ACT_OPC_VER
		fi

                CUR_CHA=`echo $OPC_PREV_VERS | cut -d "." -f1`
                OPC_CHA=`echo $OPC_VERSION | cut -d "." -f1`
                CUR_VER=`echo $OPC_PREV_VERS | cut -d "." -f2-3`
                OPC_VER=`echo $OPC_VERSION | cut -d "." -f2-3`
                export CUR_VER OPC_VER
                if [ `expr ${CUR_VER} \< ${OPC_VER}` = 1 -o \
                     "${CUR_CHA}" != "${OPC_CHA}" ]
                then
                    #we will force installation on such node
                    #bug fix NSMbb09599
                    FRC_FLAG=YES
                else
                    FRC_FLAG="${OPC_FORCE_UPD}"
                fi

		case "${FRC_FLAG}" in
		    INTERACTIVE)
			#	ask user to confirm installation
			#
			    oi_prompt_and_ask "opc" 136 "${OPC_NODE}" "" 043
			    case "${OPC_ANSWER}" in
			    	[${OPC_YES_LIT}]*)
			    	#	continue with installation
			    	#
				oi_verbose "opc" 178 ${OPC_NODE} ${OPC_VERSION}
				EXIT_VAL=0
				OPC_OVERRIDE=1; export OPC_OVERRIDE
			    ;;
			    *)
				oi_verbose "opc" 179 ${OPC_NODE}
				EXIT_VAL=0
				OPC_OVERRIDE=0; export OPC_OVERRIDE
			    ;;
			esac
		    ;;
		    YES)
			#   force update
			#
			oi_verbose "opc" 178 ${OPC_NODE} ${OPC_VERSION}
			EXIT_VAL=0
			OPC_OVERRIDE=1; export OPC_OVERRIDE
		    ;;
		    *)
	                #   do not update
			#
			oi_verbose "opc" 179 ${OPC_NODE}
			EXIT_VAL=0
			OPC_OVERRIDE=0; export OPC_OVERRIDE
                    ;;
		esac
		;;

	    *)
		#   unexpected value
		#
		oi_error "opc" 046 "$OPC_VERSION_CHECK" ${REC_VALUE} ${OPC_NODE}
		EXIT_VAL=1
		;;
	    esac
	fi
    fi

    if [ "${OPC_METHOD}" = "REMOVE" ]
    then
	#   de-installation part
	#
	case ${REC_VALUE} in
	    1)
		#   ITO software not yet installed
		#
		oi_warning "opc" 113 ${OPC_NODE}
		EXIT_VAL=1
		;;
	    2)
		#   ITO software already installed, but Managed Node belongs
		#   to different Management Server
		#
		ACT_MGMT_SERVER=${REC_STRING}

		if [ "${ACT_MGMT_SERVER}" = "unknown" ]
		then
		    #	unknown Management Server indicate possibility of
		    #	partial installation of ITO software on Managed Node
		    #	ask user to confirm installation
		    #
		    oi_warning "opc" 241 ${OPC_NODE}
		    oi_prompt_and_ask "opc" 695 "${OPC_NODE}" "" 693 "${OPC_NODE}" 
		    case "${OPC_ANSWER}" in
			[${OPC_YES_LIT}]*)
			#	continue with installation
			#
			    EXIT_VAL=0
			;;
			*)  oi_verbose "opc" 696 ${OPC_NODE}
			    EXIT_VAL=1
			;;
		    esac
		else
		    #	inform user
		    #
		    oi_error "opc" 081 ${OPC_NODE} ${ACT_MGMT_SERVER} \
		    ${OPC_MGMT_SV}
		    EXIT_VAL=1
		fi

		unset ACT_MGMT_SERVER
		;;
	    0|3|4)
		EXIT_VAL=0
		;;

	    *)
		#   unexpected value
		#
		oi_error "opc" 046 "$OPC_VERSION_CHECK" ${REC_VALUE} ${OPC_NODE}
		EXIT_VAL=1
		;;
	esac
    fi

    unset REC_VALUE REC_STRING ACT_OPC_VER
    unset REC_FILE REC_ALL_IP REC_RES_IP

    if [ ${EXIT_VAL} -eq 0 ]
    then 
	unset EXIT_VAL
	return 0
    else
	unset EXIT_VAL
	return 1
    fi

}

#==============================================================================
#
#  DESCRIPTION
#   compute required disk space on Managed Node for actual situation.
#   Free disk size if function of actual ITO version , ITO version of 
#   software installed on Managed Node and size of ITO software for 
#   other platforms to be installed on this mixed cluster system.
#   
#
#  ENVIRONMENT
#    TAPE_SIZE .. size of actual ITO software tape image
#    SW_SIZE .. required free disk size for ITO software
#
#  EXIT_CODE
#    0 .. OK
#    1 .. ERROR  
#
get_sw_size()
{
  
    #	inform user
    #
    oi_verbose "opc" 048 ${OPC_PLATFORM} ${OPC_NODE}

    #	get ITO tape image size from required.dat file in Kilo Bytes
    #
    PROD_SIZE=`awk "/^${OPC_VERSION}/ {print \\$2}" ${REQ_FILE}`

    if [ -n "${OPC_PREV_VERS}" ]
    then
    	# get required disk space for currently installed ITO version
    	# in Kilo Bytes
	#
    	OPC_PREV_SIZE=`awk "/^${OPC_PREV_VERS}/ {print \\$2}" ${REQ_FILE}`
	if [ -z "${OPC_PREV_SIZE}" ]
	then
	    # 	previous ITO version is no longer installed on 
	    #	the Management Server; maybe corresponding line is commented
	    #   in require.dat
	    #

	    OPC_PREV_SIZE=`awk '{ if ( $1 == "#@" && $2 == vers ) {
				    print $3
				    exit 0
				  }
				}' vers=${OPC_PREV_VERS} ${REQ_FILE}`
	    if [ -z "${OPC_PREV_SIZE}" ]
	    then
		#    zero is assumed
		#
		OPC_PREV_SIZE=0
	    fi
	fi
        THIS_SW_SIZE=`expr ${PROD_SIZE} - ${OPC_PREV_SIZE}`
	if [ ${THIS_SW_SIZE} -le 0 ]
	then 
	    # 	required space now smaller or equal - no problem
	    #
	    THIS_SW_SIZE=0
	fi
    else
    	#   this is new installation
	#
	THIS_SW_SIZE=${PROD_SIZE}
    fi
    SW_SIZE=${THIS_SW_SIZE}

    #   Fix for NSMbb17004: The script previously calculated
    #   TAPE_SIZE=`expr ${PROD_SIZE} - 2000`
    #   This sometimes created problems for subagent installation.
    #
    TAPE_SIZE=${PROD_SIZE}

    unset PROD_SIZE OPC_PREV_SIZE THIS_SW_SIZE

    return 0
}

#==============================================================================
#
#  DESCRIPTION
#    Create OPC_TMP_DIR on OPC_NODE node
#
#  ENVIRONMENT
#    OPC_TMP_DIR .. path-name of directory to be created
#    OPC_TMP_CREATED .. 1 when temporary directory was created
#
#  EXIT CODE
#      0 .. task successfully done
#      1 .. error
#

mk_tmp_dir()
{
    OPC_TMP_CREATED=0
#VMS Changes Starts
    ACTIVATE_ONLY=0
#VMS Changes Ends 
    if [ $ACTIVATE_ONLY != 1 ]
    then
#VMS Changes Starts
	#CHK_CMD="test -d ${OPC_TMP_DIR} 2>/dev/null ; echo ${OIRMSG_AARGH}E \$?"
	#${OPC_REXEC_SCRIPT} ${CHK_CMD}
        FTP_CMD1="cd ${OPC_TMP_DIR}"
        opcvmsftp "$FTP_CMD1"
	if [ $? -ne 1 ]
	then
	    # temporary directory must be created
	    #
	    # MK_CMD="mkdir -p ${OPC_TMP_DIR}; echo ${OIRMSG_AARGH}E \$?"
	    # ${OPC_REXEC_SCRIPT} ${MK_CMD}
            FTP_CMD1="mkdir ${OPC_TMP_DIR}"
            opcvmsftp "$FTP_CMD1"
	    if [ $? -ne 1 ]
	    then
		#   Can't create directory
		#
		oi_error "opc" 061 ${OPC_TMP_DIR} ${OPC_NODE}
		return 1
	    fi
#VMS Chnages ends
	    unset MK_CMD
	    OPC_TMP_CREATED=1
	fi
	unset CHK_CMD
    fi
    
    return 0
}

#==============================================================================
#
#  DESCRIPTION
#    Remove OPC_TMP_DIR on OPC_NODE node
#
#  ENVIRONMENT
#    OPC_TMP_DIR .. path-name of directory to be created
#
#  EXIT CODE
#      0 .. task successfully done
#      1 .. error
#

rm_tmp_dir()
{
    if [ ${OPC_TMP_CREATED} = 1 ]
    then
	# temporary directory was created
	#
        OPC_TMP_CREATED=0
#VMS Changes Starts
	#RM_CMD="rm -rf ${OPC_TMP_DIR}; echo ${OIRMSG_AARGH}E \$?"
	#${OPC_REXEC_SCRIPT} ${RM_CMD}
        FTP_CMD1="rmdir ${OPC_TMP_DIR}"
        opcvmsftp "$FTP_CMD1"
#VMS Changes Ends
	if [ $? -ne 1 ]
	then
	    #   Can't remove directory
	    #
	    return 1
	fi
	unset RM_CMD
    fi
    
    return 0
}

#==============================================================================
#
#  DESCRIPTION
#    Inspect the list of all possible IP address and in case where
#    there are more then one available: display them and ask user
#    to select correct one. List of IP addresses has following form:
#       <IP_ADDRESS>:{0|1}..
#    Than display selected IP address. "VOID" is default string
#
#  PARAMETERS
#    OPC_RESOLVED_IP .. selected IP address
#
#  ENVIRONMENT
#
#  EXIT CODE
#      0 .. ok
#      1 .. error
#

get_ip_address()
{
    SELECTED_IP="VOID"
    RET_VAL=0
    LIST_OF_IPS="$*"

    if opcux_localhost "${OPC_NODE}"
    then
	OPC_RESOLVED_IP="${SELECTED_IP}"
	return ${RET_VAL}
    fi

    # verifying connectivity from managed node to server
    #
    LIST_TO_DISPLAY=`echo "$LIST_OF_IPS" | \
		      awk 'BEGIN { ips=0 }
		           { for ( i = 1; i <= NF; i++ ) {
			       split($i,ipa,":");
			       if ( ipa[2] == 0 || ipa[2] == 2 ) status="OK"
			       else status="FAIL";
			       printf "%s\t\t%s\n", ipa[1], status;
		               if ( ipa[2] == 0 || ipa[2] == 1 ) ips++;
			     }
		           }
		           END   { exit ips }'`
    NUM_OF_ITEMS=$?
    DEFAULT_IP=`echo "${LIST_TO_DISPLAY}" | awk '/OK/ {print $1; exit 0}'`

    if [ "${DEFAULT_IP}" = "" ]
    then
        # server is not known or reachable
        oi_warning "opc" 644 ${OPC_MGMT_SV} ${OPC_NODE}
        oi_prompt_and_ask opc 643 $OPC_MGMT_SV "" \
    			      638 $OPC_MGMT_SV $OPC_NODE "" 
        if [ "$OPC_ANSWER" = $OPC_OIANSWER_DEF -o \
             "$OPC_ANSWER" = "" ]
        then
	    if [ ${NUM_OF_ITEMS} -eq 0 ]
	    then
	        # server is not known 
	        oi_error "opc" 188 ${OPC_MGMT_SV} ${OPC_NODE}
	    else
	        # server is known but not reachable
	        oi_error "opc" 640 ${OPC_MGMT_SV} ${OPC_NODE}
	    fi
	    RET_VAL=1
        else
	    RET_VAL=0
	    SELECTED_IP=`echo "$OPC_ANSWER" | awk '{print $1}'`
	    oi_note "opc" 707 ${OPC_MGMT_SV} "${SELECTED_IP}"
        fi
    else
        if [ ${NUM_OF_ITEMS} -ne 1 ]
        then
	    # more than one server IP addresses available on Managed Node
	    # select appropriate one
	    SELECTED_IP=""
	    while [ "$SELECTED_IP" = "" ]
	    do
	        #IP address of Management Server available on this Node
	        oi_s_echo "" opc 578 ${OPC_NODE}
	        echo "${LIST_TO_DISPLAY}"
	        oi_prompt_and_ask opc 639 $OPC_MGMT_SV $OPC_NODE \
				      638 $OPC_MGMT_SV $OPC_NODE $DEFAULT_IP
	        if [ "$OPC_ANSWER" = $OPC_OIANSWER_DEF ]
	        then
		    OPC_ANSWER="$DEFAULT_IP"
	        else
		    OPC_ANSWER=`echo "$OPC_ANSWER" | awk '{print $1}'`
		    oi_note "opc" 707 ${OPC_MGMT_SV} "${OPC_ANSWER}"
	        fi
	        IN_LIST=`echo "$LIST_TO_DISPLAY" | grep -v "FAIL" | \
	    	     grep -c "$OPC_ANSWER"`
	        if [ "$IN_LIST" = "0" -o \
	    	     "$OPC_ANSWER" = "OK" -o "$OPC_ANSWER" = "FAIL" ]
	        then
		    oi_s_echo "          " opc 573 $OPC_ANSWER
	        else
	    	    SELECTED_IP="$OPC_ANSWER"
	        fi
	    done
    	    RET_VAL=0
        else
	    # one IP and good one too
	    SELECTED_IP="VOID"
    	    RET_VAL=0
        fi
    fi

    OPC_RESOLVED_IP="${SELECTED_IP}"
    return ${RET_VAL}
    
}

#===============================================================================
#
#   MAIN script body
#

# display start module
if [ "${OPC_ADEBUG}" = "OPCAYES" ]
then
    set +x
    DEBUG_CNT=`expr $DEBUG_CNT + 1`
fi
if [ "${OPC_TRACE}" = "YES" ]
then
    CALLOUT_NAME=$0
    CALLOUT_PARAMS="$*"
    #oi_trace opc 588 "${MODULE_NAME}" ${CALLOUT_NAME} "\"${CALLOUT_PARAMS}\""
    echo "TRACE:    ${TRACE_HEADER}" | \
    sed -e "s=MODULxxxxxxx=${MODULE_NAME}=" \
        -e "s=SCRIPTxxxxxx=${CALLOUT_NAME}=" \
    >> ${OPC_TRACE_FILE}
fi
if [ "${OPC_DEBUG}" = "YES" ]
then
    # enable tracing in all functions
    eval `typeset -f | awk '/^function/ { printf "typeset -tf %s\n", $2 }'`
fi
if [ "${OPC_ADEBUG}" = "OPCAYES" ]
then
    if [ ${DEBUG_CNT} -gt 0 ]
    then
	DEBUG_CNT=`expr $DEBUG_CNT - 1`
    fi
    if [ ${DEBUG_CNT} -gt 0 ]
    then
	set -x
    fi
fi

#   set global environment variables
#
REQ_FILE=${OPC_INST_DIR}/${OPC_PLATFORM}/require.dat
TMP_FILE=${OPC_TOPC}/opc-$$.tmp

#   initialize OPC_NODE environment variables
#
OPC_STATUS="NOTREADY"
OPC_OVERRIDE=0
OPC_PREV_VERS=""
export OPC_STATUS OPC_OVERRIDE OPC_PREV_VERS
OPC_TAPE_DIR=${OPC_TMP_DIR}

OPC_TMP_CREATED=0

#   initialize OPC_NODE's cluster server environment variables
#
OPC_CL_NODE=""
OPC_CL_IP_ADDRESS=""
OPC_CL_PASSWD_REQ=0
OPC_CL_PASSWD=""
export OPC_CL_NODE OPC_CL_IP_ADDRESS OPC_CL_PASSWD_REQ OPC_CL_PASSWD

#   input parameters
#
if [ $# -ne 1 ]
then
    #	usage error
    #
    oi_error "opc" 511 "opcuxprlg"
    EXIT 1
fi
OPC_OUTPUT_FILE=$1

OUTPUT_F=${OPC_TOPC}/output.data                 # tmp filename

#   1. create OPC_TMP_DIR directory on Managed Node
#
mk_tmp_dir
if [ $? -eq 1 ]
then
    produce_work 
    EXIT 1
fi

#   2. check OS of Managed Node.
#
if [ -z "${OPC_OS_CHECK}" ]
then
    #	remote copy and execute callout script is unknown
    #	exit with an error
    #
    oi_error "opc" 522 "OS_CHECK" "${OPC_PLATFORM}"
    produce_work 
    EXIT 1
fi

$OPC_OS_CHECK ${TMP_FILE}
if [ $? -eq 1 ]
then
    #	invalid OS
    #
    produce_work 
    EXIT 1
fi

#   check if OS version is supported by Agent software installed on Management
#   Server
#   

OPC_OS_VERSION="`cat ${TMP_FILE}`"
chk_os_support `cat ${TMP_FILE}`
if [ $? -eq 1 ]
then
    produce_work 
    EXIT 1
fi

#VMS Changes Starts
#   3. check Managed Node kind.
#
#if [ -z "${OPC_NODE_CHECK}" ]
#then
    #	remote copy and execute callout script is unknown
    #	exit with an error
    #
#    oi_error "opc" 522 "NODE_CHECK" "${OPC_PLATFORM}"
#    EXIT 1
#fi

#$OPC_NODE_CHECK ${TMP_FILE}
#if [ $? -eq 1 ]
#then
    #	invalid node kind
    #
#    produce_work 
#    EXIT 1
#fi

#OPC_CL_NODE=`cat ${TMP_FILE}`; export OPC_CL_NODE

#if [ -z "${OPC_CL_NODE}" ]
#then 
    OPC_NODE_KIND="SERVER"
#else
    #	try to find correct cluster server password
    #
#    get_pwd_cluster
    #	ask for confirmation of installation witch will be
    #	performed on cluster server system too
    #
#    ask_for_cluster
#    if [ $? -ne 0 ]
#    then
    	#   user did not confirmed continuation of installation 
	#   on cluster server system
    	#
#	produce_work 
#	EXIT 1
#    fi
#    OPC_NODE_KIND="CLIENT"
#fi
export OPC_NODE_KIND

#if [ "${OPC_NODE_KIND}" = "CLIENT" ]
#then 
    #   remove OPC_TMP_DIR directory on Managed Node
    #
#    rm_tmp_dir
    
    #	4. Managed Node is cluster client system
    #   switch environment to cluster server system
    #
#    opcuxenvsave SAVE

    #	5. check password for cluster server system.
    #
#    opcvmspwget
#    if [ $? -eq 1 ]
#    then
	#	invalid password specified; skip this node
	#
#	oi_error "opc" 165 ${OPC_NODE}
#	opcuxenvsave RESTORE
#	produce_work 
#	EXIT 1
#    fi

    #   create OPC_TMP_DIR directory on Managed Node's cluster server
    #
#    mk_tmp_dir
#    if [ $? -eq 1 ]
#    then
#	opcuxenvsave RESTORE
#	produce_work 
#	EXIT 1
#    fi

    #	6. check Managed Node's cluster server type.
    #
#    if [ -z "${OPC_CLUSTER_CHECK}" ]
#    then
	#	remote copy and execute callout script is unknown
	#	exit with an error
	#
#	oi_error "opc" 522 "CLUSTER_CHECK" "${OPC_PLATFORM}"
#	EXIT 1
#    fi

#    $OPC_CLUSTER_CHECK
#    if [ $? -eq 1 ]
#    then
	#   cluster server test failed
	#
#	oi_error "opc" 039 ${OPC_PLATFORM} ${OPC_NODE}
#	opcuxenvsave RESTORE
#	produce_work 
#	EXIT 1
#    fi

#fi
ACTIVATE_ONLY=0
#VMS Changes ends

if [ "${ACTIVATE_ONLY}" = "0" ]
then 

    #   This is (de)installation
    #   7. check ITO software version.
    #
    if [ -z "${OPC_VERSION_CHECK}" ]
    then
	#	remote copy and execute callout script is unknown
	#	exit with an error
	#
	oi_error "opc" 522 "VERSION_CHECK" "${OPC_PLATFORM}"
	EXIT 1
    fi

    $OPC_VERSION_CHECK ${TMP_FILE}
    RET_VALUE=$?
    # BBC installation...
    # check if user aborted installation
    if [ ${RET_VALUE} -eq 1 -a "${OPC_METHOD}" = "INSTALL" ]
    then
      # stop operation
      produce_work
      EXIT 1
    fi

    if [ "${OPC_METHOD}" = "INSTALL" ]
    then 
	#   8. check disks space on Managed node.
	#
	get_sw_size

	$OPC_SPACE_CHECK ${TAPE_SIZE} ${SW_SIZE} ${TMP_FILE}
	if [ $? -eq 1 ]
	then
	    #   not enough free disk space
	    #
	    if [ "${OPC_NODE_KIND}" = "CLIENT" ]
	    then
		opcuxenvsave RESTORE
	    fi
	    produce_work 
	    EXIT 1
	fi
	OPC_TAPE_DIR=`cat ${TMP_FILE}`; export OPC_TAPE_DIR

	# add OPC_NODE in list of all nodes on wich ITO package will be 
	# installed during this run of installation script.
	#
	add_previous_node
    fi

fi

if [ "${OPC_NODE_KIND}" = "CLIENT" ]
then
    #	9. restore environment variables, that right values will be displaied
    #
    opcuxenvsave RESTORE
fi

#   10. all tests ok - produce full work_file line
#
OPC_STATUS="READY"
produce_work

#   remove temporary file
#
rm -f ${TMP_FILE}
EXIT 0

################################################################################
# end of opcvmsprlg
################################################################################
