#!/bin/bash ##### # esg-node: ESGF Node Application Stack # chkconfig: 345 98 02 # description: Installer for the ESGF Node application stack # #**************************************************************************** #* * #* Organization: Earth System Grid Federation * #* Project: Earth Systems Grid Fed (ESGF) Node Software Stack * #* First Author: Gavin M. Bell (gavin@llnl.gov) * #* * #**************************************************************************** #* * #* Copyright (c) 2009, Lawrence Livermore National Security, LLC. * #* Produced at the Lawrence Livermore National Laboratory * #* Written by: Gavin M. Bell (gavin@llnl.gov) * #* LLNL-CODE-420962 * #* * #* All rights reserved. This file is part of the: * #* Earth System Grid Fed (ESGF) Node Software Stack, Version 1.0 * #* * #* For details, see http://esgf.llnl.gov * #* Please also read this link * #* http://esgf.llnl.gov/LICENSE * #* * #* * Redistribution and use in source and binary forms, with or * #* without modification, are permitted provided that the following * #* conditions are met: * #* * #* * Redistributions of source code must retain the above copyright * #* notice, this list of conditions and the disclaimer below. * #* * #* * Redistributions in binary form must reproduce the above copyright * #* notice, this list of conditions and the disclaimer (as noted below) * #* in the documentation and/or other materials provided with the * #* distribution. * #* * #* Neither the name of the LLNS/LLNL nor the names of its contributors * #* may be used to endorse or promote products derived from this * #* software without specific prior written permission. * #* * #* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * #* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * #* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * #* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE * #* LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR * #* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * #* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * #* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * #* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * #* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * #* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * #* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * #* SUCH DAMAGE. * #* * #**************************************************************************** ##### #uses: perl, awk, ifconfig, tar, wget, curl, su, useradd, groupadd, # id, chmod, chown, chgrp, cut, svn, mkdir, killall, java, egrep, # lsof, unlink, ln, pax, keytool, openssl, getent #note: usage of readlink not macosx friendly :-( usage of useradd / # groupadd is RedHat/CentOS dependent :-( #note on getting bash version... #bash --version | head -n +1 | sed -n -e 's/.*version[ ]*\([^(-]*\).*/\1/p' export LANG=POSIX umask 022 DEBUG=${DEBUG:-0} VERBOSE=${VERBOSE:-0} devel=${devel:-0} recommended=1 custom=0 use_local_files=0 progname="esg-node" script_version="v2.5-RC5.4.0-devel" script_maj_version="2.5" script_release="2.5.13-devel" envfile="/etc/esg.env" #-------------- #User Defined / Settable (public) #-------------- install_prefix=${install_prefix:-${ESGF_INSTALL_PREFIX:-"/usr/local"}} #-------------- #-------------- #Script vars - do not edit #-------------- esg_functions_file="${install_prefix}/bin/esg-functions" esg_init_file="${install_prefix}/bin/esg-init" #-------------- esgf_nm_funcs_file="${install_prefix}/bin/esgf-nm-func" export UVCDAT_ANONYMOUS_LOG=False [ -e "${envfile}" ] && source ${envfile} && ((VERBOSE)) && printf "sourcing environment from: ${envfile} \n" init_structure() { #-------------- #Prepare necessary support filesystem structure and files #-------------- (($DEBUG)) && echo "init_structure: esg_dist_url = ${esg_dist_url}" #-------------- #Let's go down the line and make sure that we have what we need structurally on the filesystem local config_check=8 if [ ! -e ${scripts_dir} ]; then mkdir -p ${scripts_dir} && ((config_check--)); else ((config_check--)); fi if [ ! -e ${esg_backup_dir} ]; then mkdir -p ${esg_backup_dir} && ((config_check--)); else ((config_check--)); fi if [ ! -e ${esg_tools_dir} ]; then mkdir -p ${esg_tools_dir} && ((config_check--)); else ((config_check--)); fi if [ ! -e ${esg_log_dir} ]; then mkdir -p ${esg_log_dir} && ((config_check--)); else ((config_check--)); fi if [ ! -e ${esg_config_dir} ]; then mkdir -p ${esg_config_dir} && ((config_check--)); else ((config_check--)); fi if [ ! -e ${esg_etc_dir} ]; then mkdir -p ${esg_etc_dir} && ((config_check--)); else ((config_check--)); fi if [ ! -e ${tomcat_conf_dir} ]; then mkdir -p ${tomcat_conf_dir} && ((config_check--)); else ((config_check--)); fi if [ ! -e ${config_file} ]; then touch ${config_file} && ((config_check--)); else ((config_check--)); fi debug_print ${config_check} ((config_check != 0 )) && echo "ERROR: checklist incomplete $([FAIL])" && checked_done 1 || verbose_print "checklist $([OK])" #-------------- chmod 777 ${esg_etc_dir} 2> /dev/null [ -w "${envfile}" ] && write_paths #-------------- #Setup variables.... #-------------- check_for_my_ip esgf_host=${esgf_host} [ -z "${esgf_host}" ] && get_property esgf_host esgf_default_peer=${esgf_default_peer} [ -z "${esgf_default_peer}" ] && get_property esgf_default_peer esgf_idp_peer_name=${esgf_idp_peer_name} [ -z "${esgf_idp_peer_name}" ] && get_property esgf_idp_peer_name esgf_idp_peer=${esgf_idp_peer} [ -z "${esgf_idp_peer}" ] && get_property esgf_idp_peer myproxy_endpoint=${esgf_idp_peer%%/*} [ -z "${myproxy_port}" ] && get_property myproxy_port myproxy_port=${myproxy_port:-7512} esg_root_id=${esg_root_id} [ -z "${esg_root_id}" ] && get_property esg_root_id node_peer_group=${node_peer_group} [ -z "${node_peer_group}" ] && get_property node_peer_group [ -z "${node_short_name}" ] && get_property node_short_name #NOTE: Calls to get_property must be made AFTER we touch the file ${config_file} to make sure it exists #this is actually an issue with dedup_properties that gets called in the get_property function #Get the distinguished name from environment... if not, then esgf.properties... and finally this can be overwritten by the --dname option #Here node_dn is written in the /XX=yy/AAA=bb (macro->micro) scheme. #We transform it to dname which is written in the java style AAA=bb, XX=yy (micro->macro) scheme using "standard2java_dn" function dname=${dname} [ -z "${dname}" ] && get_property node_dn && dname=$(standard2java_dn ${node_dn}) gridftp_config=${gridftp_config} [ -z "${gridftp_config}" ] && get_property gridftp_config "bdm end-user" publisher_config=${publisher_config} [ -z "${publisher_config}" ] && get_property publisher_config "esg.ini" publisher_home=${publisher_home} [ -z "${publisher_home}" ] && get_property publisher_home ${esg_config_dir}/esgcet # Sites can override default keystore_alias in esgf.properties (keystore.alias=) get_property keystore_alias ${keystore_alias} export ESGINI=${publisher_home}/${publisher_config} ((DEBUG)) && echo "ESGINI = ${ESGINI}" return 0 } write_paths() { ((show_summary_latch++)) echo "export ESGF_HOME=${esg_root_dir}" >> ${envfile} echo "export ESG_USER_HOME=${installer_home}" >> ${envfile} echo "export ESGF_INSTALL_WORKDIR=${workdir}" >> ${envfile} echo "export ESGF_INSTALL_PREFIX=${install_prefix}" >> ${envfile} echo "export PATH=$myPATH:\$PATH" >> ${envfile} echo "export LD_LIBRARY_PATH=$myLD_LIBRARY_PATH:\$LD_LIBRARY_PATH" >> ${envfile} dedup ${envfile} && source ${envfile} } #checking for what we expect to be on the system a-priori #that we are not going to install or be responsible for check_prerequisites() { printf " \033[01;31m EEEEEEEEEEEEEEEEEEEEEE SSSSSSSSSSSSSSS GGGGGGGGGGGGGFFFFFFFFFFFFFFFFFFFFFF E::::::::::::::::::::E SS:::::::::::::::S GGG::::::::::::GF::::::::::::::::::::F E::::::::::::::::::::ES:::::SSSSSS::::::S GG:::::::::::::::GF::::::::::::::::::::F EE::::::EEEEEEEEE::::ES:::::S SSSSSSS G:::::GGGGGGGG::::GFF::::::FFFFFFFFF::::F E:::::E EEEEEES:::::S G:::::G GGGGGG F:::::F FFFFFF\033[0m \033[01;33m E:::::E S:::::S G:::::G F:::::F E::::::EEEEEEEEEE S::::SSSS G:::::G F::::::FFFFFFFFFF E:::::::::::::::E SS::::::SSSSS G:::::G GGGGGGGGGG F:::::::::::::::F E:::::::::::::::E SSS::::::::SS G:::::G G::::::::G F:::::::::::::::F E::::::EEEEEEEEEE SSSSSS::::S G:::::G GGGGG::::G F::::::FFFFFFFFFF\033[0m \033[01;32m E:::::E S:::::SG:::::G G::::G F:::::F E:::::E EEEEEE S:::::S G:::::G G::::G F:::::F EE::::::EEEEEEEE:::::ESSSSSSS S:::::S G:::::GGGGGGGG::::GFF:::::::FF E::::::::::::::::::::ES::::::SSSSSS:::::S GG:::::::::::::::GF::::::::FF E::::::::::::::::::::ES:::::::::::::::SS GGG::::::GGG:::GF::::::::FF EEEEEEEEEEEEEEEEEEEEEE SSSSSSSSSSSSSSS GGGGGG GGGGFFFFFFFFFFF.llnl.gov \033[0m " printf "Checking that you have root privs on $(hostname)... " id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust run this program with root's effective UID\n\n" && return 1 [OK] #---------------------------------------- echo "Checking requisites... " # checking for OS, architecture, distribution and version OS=`uname -s` MACH=`uname -m` if [ "${OS}" = "Linux" ] && [ "${MACH}" = "x86_64" ] && [ -f /etc/redhat-release ] ; then MAJREV=`cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*// | sed s/[.].*//` fi if [ "${MAJREV}" = 6 ] ; then DIST=`cat /etc/redhat-release | sed s/release.*//` DISTRIB=`echo $DIST` fi if [[ -z $DISTRIB ]] ; then echo "ESGF can only be installed on versions 6 of Red Hat, CentOS or Scientific Linux x86_64 systems" && exit 1 ; fi #---------------------------------------- echo return 0 } ##### # Java ##### setup_java() { echo -n "Checking for java >= ${java_min_version} and valid JAVA_HOME... " [ -e ${java_install_dir} ] && check_version $java_install_dir/bin/java ${java_min_version} [ $? == 0 ] && (( ! force_install )) && [OK] && return 0 echo echo "*******************************" echo "Setting up Java... ${java_version}" echo "*******************************" echo local last_java_truststore_file local default="Y" ((force_install)) && default="N" local dosetup if [ -x ${java_install_dir}/bin/java ]; then echo "Detected an existing java installation..." read -e -p "Do you want to continue with Java installation and setup? $([ "$default" = "N" ] && echo "[y/N]" || echo "[Y/n]") " dosetup [ -z ${dosetup} ] && dosetup=${default} if [ "${dosetup}" != "Y" ] && [ "${dosetup}" != "y" ]; then echo "Skipping Java installation and setup - will assume Java is setup properly" return 0 fi last_java_truststore_file=$(readlink -f ${truststore_file}) echo fi mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} #>& /dev/null local java_dist_file=${java_dist_url##*/} #strip off -(32|64).tar.gz at the end java_dist_dir=$(echo ${java_dist_file} | awk 'gsub(/-(32|64)('$compress_extensions')/,"")') #Check to see if we have an Java distribution directory if [ ! -e ${java_install_dir%/*}/${java_dist_dir} ]; then echo "Don't see java distribution dir ${java_install_dir%/*}/${java_dist_dir}" if [ ! -e ${java_dist_file} ]; then echo "Don't see java distribution file $(pwd)/${java_dist_file} either" echo "Downloading Java from ${java_dist_url}" checked_get ${java_dist_file} ${java_dist_url} $((force_install)) [ $? != 0 ] && echo " ERROR: Could not download Java" && popd && checked_done 1 echo "unpacking ${java_dist_file}..." tar xzf ${java_dist_file} -C ${java_install_dir%/*} # i.e. /usr/local [ $? != 0 ] && echo " ERROR: Could not extract Java" && popd && checked_done 1 fi fi #If you don't see the directory but see the tar.gz distribution #then expand it if [ -e ${java_dist_file} ] && [ ! -e ${java_install_dir%/*}/${java_dist_dir} ]; then echo "unpacking ${java_dist_file}..." tar xzf ${java_dist_file} -C ${java_install_dir%/*} # i.e. /usr/local [ $? != 0 ] && echo " ERROR: Could not extract Java..." && popd && checked_done 1 fi if [ ! -e ${java_install_dir} ]; then ln -s ${java_install_dir%/*}/${java_dist_dir} ${java_install_dir} [ $? != 0 ] && \ echo " ERROR: Could not create sym link ${java_install_dir%/*}/${java_dist_dir} -> ${java_install_dir}" && popd && checked_done 1 else unlink ${java_install_dir} [ $? != 0 ] && mv ${java_install_dir} ${java_install_dir}.$(date ${date_format}).bak ln -s ${java_install_dir%/*}/${java_dist_dir} ${java_install_dir} [ $? != 0 ] && \ echo " ERROR*: Could not create sym link ${java_install_dir%/*}/${java_dist_dir} -> ${java_install_dir}" && popd && checked_done 1 fi debug_print "chown -R ${installer_uid}:${installer_gid} ${java_install_dir}" chown ${installer_uid}:${installer_gid} ${java_install_dir} chown -R ${installer_uid}:${installer_gid} $(readlink -f ${java_install_dir}) popd >& /dev/null ${java_install_dir}/bin/java -version [ $? != 0 ] && echo "ERROR cannot run ${java_install_dir}/bin/java" && checked_done 1 write_java_env write_java_install_log #----------------------------- #In the situation where this function is called under update #semantics i.e. there is already a previous installation of java #and installation of tomcat with tomcat setup with a properly #generated/configured jssecacerts file and there is a valid #ESGF_IDP_PEER being pointed to. We should copy over that #jssecacerts into this newly installed VM to satisfy SSL. if [ -n "${last_java_truststore_file}" ] && [ -e "${last_java_truststore_file}" ]; then mkdir -p ${java_install_dir}/conf cp -v ${last_java_truststore_file} ${java_install_dir}/conf chmod 644 ${java_install_dir}/conf/${last_java_truststore_file##*/} fi #----------------------------- checked_done 0 } write_java_env() { ((show_summary_latch++)) echo "export JAVA_HOME=${java_install_dir}" >> ${envfile} prefix_to_path PATH ${java_install_dir}/bin >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } write_java_install_log() { echo "$(date ${date_format}) java=${java_version} ${java_install_dir%/*}/${java_dist_dir}" >> ${install_manifest} dedup ${install_manifest} return 0 } ##### # Ant ##### setup_ant() { echo -n "Checking for ant >= ${ant_min_version} " [ -e ${ant_install_dir} ] && check_version ${ant_install_dir}/bin/ant ${ant_min_version} [ $? == 0 ] && (( ! force_install )) && [OK] && return 0 echo echo "*******************************" echo "Setting up Ant... ${ant_version}" echo "*******************************" echo local default="Y" ((force_install)) && default="N" local dosetup if [ -x ${ant_install_dir}/bin/ant ]; then echo "Detected an existing ant installation..." read -e -p "Do you want to continue with Ant installation and setup? $([ "$default" = "N" ] && echo "[y/N]" || echo "[Y/n]") " dosetup [ -z ${dosetup} ] && dosetup=${default} if [ "${dosetup}" != "Y" ] && [ "${dosetup}" != "y" ]; then echo "Skipping Ant installation and setup - will assume ant is setup properly" return 0 fi echo fi mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null local ant_dist_file=${ant_dist_url##*/} #strip off -bin.tar.gz at the end ant_dist_dir=${ant_dist_file/-bin.tar.gz} #There is this pesky case of having a zero sized dist file... if [ -e ${ant_dist_file} ]; then ls -l ${ant_dist_file} local size=$(stat -c%s ${ant_dist_file}) (( size == 0 )) && rm -v ${ant_dist_file} fi #Check to see if we have an Ant distribution directory if [ ! -e ${ant_install_dir%/*}/${ant_dist_dir} ]; then echo "Don't see ant distribution dir ${ant_install_dir%/*}/${ant_dist_dir}" if [ ! -e ${ant_dist_file} ]; then echo "Don't see ant distribution file $(pwd)/${ant_dist_file} either" echo "Downloading Ant from ${ant_dist_url}" wget -O ${ant_dist_file} ${ant_dist_url} [ $? != 0 ] && echo " ERROR: Could not download Ant" && popd && checked_done 1 echo "unpacking ${ant_dist_file}..." tar xzf ${ant_dist_file} -C ${ant_install_dir%/*} # i.e. /usr/local [ $? != 0 ] && echo " ERROR: Could not extract Ant" && popd && checked_done 1 fi fi #If you don't see the directory but see the tar.gz distribution #then expand it if [ -e ${ant_dist_file} ] && [ ! -e ${ant_install_dir%/*}/${ant_dist_dir} ]; then echo "unpacking ${ant_dist_file}..." tar xzf ${ant_dist_file} -C ${ant_install_dir%/*} # i.e. /usr/local [ $? != 0 ] && echo " ERROR: Could not extract Ant..." && popd && checked_done 1 fi if [ ! -e ${ant_install_dir} ]; then ln -s ${ant_install_dir%/*}/${ant_dist_dir} ${ant_install_dir} [ $? != 0 ] && \ echo " ERROR: Could not create sym link ${ant_install_dir%/*}/${ant_dist_dir} -> ${ant_install_dir}" && popd && checked_done 1 else unlink ${ant_install_dir} [ $? != 0 ] && mv ${ant_install_dir} ${ant_install_dir}.$(date ${date_format}).bak ln -s ${ant_install_dir%/*}/${ant_dist_dir} ${ant_install_dir} [ $? != 0 ] && \ echo " ERROR: Could not create sym link ${ant_install_dir%/*}/${ant_dist_dir} -> ${ant_install_dir}" && popd && checked_done 1 fi ${ant_install_dir}/bin/ant -version [ $? != 0 ] && echo "ERROR cannot run ${ant_install_dir}/bin/ant" && checked_done 1 write_ant_env write_ant_install_log checked_done 0 } write_ant_env() { ((show_summary_latch++)) echo "export ANT_HOME=${ant_install_dir}" >> ${envfile} prefix_to_path PATH ${ant_install_dir}/bin >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } write_ant_install_log() { echo "$(date ${date_format}) ant=${ant_version} $(readlink -f ${ant_install_dir})" >> ${install_manifest} dedup ${install_manifest} return 0 } ##### # PostgreSQL ##### setup_postgress() { debug_print "DEBUG: entering setup_postgress()" _is_managed_db && return 0 echo -n "Checking for postgresql >= ${postgress_min_version} " check_version ${postgress_bin_dir}/postgres ${postgress_min_version} local ret=$? if [ $ret == 0 ] ; then (( ! force_install )) && [OK] && return 0 fi local upgrade=$(( (ret==1) ? 1 : 0 )) #see check_version() function comments for meaning of return values #--------------------------------------- #Setup PostgreSQL RPM repository #--------------------------------------- echo echo "*****************************" echo "Setting PostgreSQL Server RPM" echo "*****************************" echo yum -y install postgresql postgresql-server postgresql-devel [ $? != 0 ] && printf "$([FAIL]) \n\tCould not install or update postgresql\n\n" && return 1 local default="Y" ((force_install)) && default="N" local dosetup if [ -x ${postgress_install_dir}/bin/psql ]; then echo "Detected an existing postgress installation... (will not re-install)" echo "To force a re-install you must manually remove the database completely - remove ${postgress_install_dir}/bin/psql" echo 'Only if you know what you are doing!!!!!' echo read -e -p "Do you want to backup the curent database? $([ "$default" = "N" ] && echo "[y/N]" || echo "[Y/n]") " dosetup dosetup=$(echo "${dosetup}" | tr [a-z] [A-Z]) [ -z ${dosetup} ] && dosetup=${default} if [ "${dosetup}" = "Y" ] || [ "${dosetup}" = "YES" ]; then backup_db fi echo return 0 fi if ((upgrade)); then echo "UPGRADE of postgress completed" echo "It is recommended to restart the entire node as to not leave" echo "orphaned db connections that have been created by the rest of" echo "the application stack" echo "Restarting Database..." stop_postgress start_postgress checked_done $? fi ######## #Create the system account for postgress to run as. ######## local pg_sys_acct_homedir="/var/lib/pgsql" id $pg_sys_acct if [ $? != 0 ]; then echo " Hmmm...: There is no postgres system account user \"$pg_sys_acct\" present on system, making one..." #NOTE: "useradd/groupadd" are a RedHat/CentOS thing... to make this cross distro compatible clean this up. if [ ! $(getent group ${pg_sys_acct_group}) ]; then /usr/sbin/groupadd -r ${pg_sys_acct_group} [ $? != 0 ] && [ $? != 9 ] && echo "ERROR: Could not add postgres system group: ${pg_sys_acct_group}" && popd && checked_done 1 fi if [ -z "${pg_sys_acct_passwd}" ]; then #set the password for the system user... while [ 1 ]; do local input read -e -s -p "Create password for postgress system account: " input [ -n "${input}" ] && pg_sys_acct_passwd=${input} && unset input && break done fi echo "Creating account..." /usr/sbin/useradd -r -c"PostgreSQL Service ESGF" -d $pg_sys_acct_homedir -g $pg_sys_acct_group -p $pg_sys_acct_passwd -s /bin/bash $pg_sys_acct [ $? != 0 ] && [ $? != 9 ] && echo "ERROR: Could not add postgres system account user" && popd && checked_done 1 echo "${pg_sys_acct_passwd}" > ${pg_secret_file} else #check that this existing postgres account has a shell (required) local postgress_user_shell="$(sed -n 's#\('${pg_sys_acct}'.*:\)\(.*\)$#\2#p' /etc/passwd)" if [ "${postgress_user_shell}" != "/bin/bash" ]; then echo "Noticed that the existing postgres user [${pg_sys_acct}] does not have the bash shell... Hmmm... making it so " sed -i 's#\('${pg_sys_acct}'.*:\)\(.*\)$#\1/\bin/\bash#' /etc/passwd echo "grep '${pg_sys_acct}': /etc/passwd" [ "$(sed -n 's#\('${pg_sys_acct}'.*:\)\(.*\)$#\2#p' /etc/passwd)" = "/bin/bash" ] && [OK] || [FAIL] fi fi [ -e "${pg_secret_file}" ] && chmod 640 ${pg_secret_file} && chown ${installer_uid}:${tomcat_group} ${pg_secret_file} ######## sleep 3 #double check that the account is really there! echo id $pg_sys_acct >& /dev/null [ $? != 0 ] && grep $pg_sys_acct /etc/passwd && echo " ERROR: Problem with $pg_sys_acct creation!!!" && checked_done 1 chown -R $pg_sys_acct $postgress_install_dir chgrp -R $pg_sys_acct_group $postgress_install_dir #Create the database: mkdir -p ${postgress_install_dir}/data chown -R ${pg_sys_acct} ${postgress_install_dir}/data [ $? != 0 ] && " ERROR: Could not change ownership of postgres' data to \"$pg_sys_acct\" user" && popd && checked_done 1 chmod 700 $postgress_install_dir/data su $pg_sys_acct -c "$postgress_bin_dir/initdb -D $postgress_install_dir/data" mkdir $postgress_install_dir/log chown -R $pg_sys_acct $postgress_install_dir/log [ $? != 0 ] && " ERROR: Could not change ownership of postgres' log to \"$pg_sys_acct\" user" && popd && checked_done 1 #Start the database start_postgress #Check to see if there is a ${postgress_user} already on the system if not, make one if [ ! -x ${postgress_bin_dir}/psql ] ; then echo " ERROR: psql not found after install!" && checked_done 1 fi if (( $(PGPASSWORD=${pg_sys_acct_passwd:=${security_admin_password}} psql -U postgres -c "select count(*) from pg_roles where rolname='${postgress_user}'" postgres | tail -n +3 | head -n 1) > 0 )); then echo "${postgress_user} exists!! :-)"; else while [ 1 ]; do while [ 1 ]; do echo "Enter password for postgres user $postgress_user: "; read -s p1; echo "Re-enter password for postgres user $postgress_user: "; read -s p2; if [ "$p1" != "$p2" ]; then echo "The passwords did not tally. Enter same password twice"; continue; else break; fi done sudo -u $pg_sys_acct $postgress_bin_dir/psql -c "create user $postgress_user with superuser password '$p1';" if [ $? -eq 0 ]; then popd && checked_done 0; break; fi done fi #stop_postgress && return 1 #See trap in 'main'... that is who calls this. local fetch_file cd $postgress_install_dir/data #Get files fetch_file=pg_hba.conf checked_get ./${fetch_file} ${esg_dist_url}/externals/bootstrap/${fetch_file} $((force_install)) (( $? > 1 )) && popd && checked_done 1 chmod 600 ${fetch_file} #Get File... fetch_file=postgresql.conf checked_get ./${fetch_file} ${esg_dist_url}/externals/bootstrap/${fetch_file} $((force_install)) (( $? > 1 )) && popd && checked_done 1 chmod 600 ${fetch_file} #----- #NOTE: This database is an internal database to this esg #application stack... I don't think it would even be prudent to #offer then opportunity for someone to bind to the public #interface. If they choose to do so after the fact, then they are #making that conscious decision, but I won't make it a part of #this process. #@@postgress_host@@ #Token in file... #local input #read -e -p "Please Enter the IP address or name of this host [${postgress_host}]:> " input #[ ! -z "${input}" ] && postgress_host=${input} #printf "\nUsing IP: ${postgress_host}\n" #eval "perl -p -i -e 's/\\@\\@postgress_host\\@\\@/${postgress_host}/g' ${fetch_file}" #----- #@@postgress_port@@ #Token in file... unset input read -e -p "Please Enter PostgreSQL port number [${postgress_port}]:> " input [ ! -z "${input}" ] && postgress_port=${input} printf "\nSetting Postgress Port: ${postgress_port} " eval "perl -p -i -e 's/\\@\\@postgress_port\\@\\@/${postgress_port}/g' ${fetch_file}" [ $? == 0 ] && [OK] || [FAIL] printf "Setting Postgress Log Dir: ${postgress_install_dir} " eval "perl -p -i -e 's#\\@\\@postgress_install_dir\\@\\@#${postgress_install_dir}#g' ${fetch_file}" [ $? == 0 ] && [OK] || [FAIL] chown -R $pg_sys_acct $postgress_install_dir chgrp -R $pg_sys_acct_group $postgress_install_dir popd >& /dev/null echo check_shmmax echo write_postgress_env write_postgress_install_log checked_done 0 } write_postgress_env() { ((show_summary_latch++)) echo "export PGHOME=$PGHOME" >> ${envfile} echo "export PGUSER=$PGUSER" >> ${envfile} echo "export PGHOST=$PGHOST" >> ${envfile} echo "export PGPORT=$PGPORT" >> ${envfile} echo "export PGBINDIR=$PGBINDIR" >> ${envfile} echo "export PGLIBDIR=$PGLIBDIR" >> ${envfile} prefix_to_path PATH ${postgress_bin_dir} >> ${envfile} prefix_to_path LD_LIBRARY_PATH ${postgress_lib_dir} >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } write_postgress_install_log() { echo "$(date ${date_format}) postgres=${postgress_version} ${postgress_install_dir}" >> ${install_manifest} dedup ${install_manifest} return 0 } #returns 1 if it is already running (if check_postgress_process returns 0 - true) start_postgress() { _is_managed_db && echo "Please be sure external database is running at this point..." && return 0 check_postgress_process && return 1 echo "Starting Postgress..." /etc/init.d/postgresql start check_shmmax sleep 3 /bin/ps -elf | grep postgres | grep -v grep checked_done 0 } stop_postgress() { _is_managed_db && echo "Please be sure external database is NOT running at this point..." && return 0 check_postgress_process [ $? != 0 ] && return 1 /etc/init.d/postgresql stop check_shmmax /bin/ps -elf | grep postgres | grep -v grep return 0 } test_postgress() { echo echo "----------------------------" echo "Postgress Test... " echo "----------------------------" echo ${postgress_bin_dir}/psql --version [ $? != 0 ] && echo" ERROR: Could NOT successfully locate postgres installation!!" && popd >& /dev/null && checked_done 1 start_postgress local ret=$(PGPASSWORD=${PGPASSWORD:-${pg_sys_acct_passwd}} psql -qt -c "select table_name from information_schema.tables;" postgres ${postgress_user} | grep -v ^$ | wc -l) ((ret == 0)) && echo " ERROR: Could not verify database installation! (perhaps \"pg_sys_acct_passwd\" was not set correctly?)" && checked_done 1 [OK] checked_done 0 } #Needed for UV-CDAT build... setup_cmake() { echo -n "Checking for CMake >= ${cmake_min_version} " check_version_with cmake "cmake --version | awk '{print \$3}' | sed -re 's/([^-]*)-.*/\1/'" ${cmake_min_version} ${cmake_max_version} [ $? == 0 ] && (( ! force_install )) && [OK] && return 0 echo echo "*******************************" echo "Setting up CMake ${cmake_version}" echo "*******************************" echo local default="Y" ((force_install)) && default="N" local dosetup if [ -x ${cmake_install_dir}/bin/cmake ]; then echo "Detected an existing CMAKE installation..." read -e -p "Do you want to continue with CMAKE installation and setup? $([ "$default" = "N" ] && echo "[y/N]" || echo "[Y/n]") " dosetup [ -z ${dosetup} ] && dosetup=${default} if [ "${dosetup}" != "Y" ] && [ "${dosetup}" != "y" ]; then echo "Skipping CMAKE installation and setup - will assume CMAKE is setup properly" return 0 fi echo fi #make top level directory for cmake repo clone mkdir -p ${cmake_workdir%/*} chmod a+rw ${cmake_workdir%/*} if [ ! -d "${cmake_workdir}" ]; then echo "Cloning CMake repository ${cmake_repo}..." git clone ${cmake_repo} ${cmake_workdir} [ $? != 0 ] && echo "ERROR: Could not clone CMake repository" && checked_done 1 fi ( unset LD_LIBRARY_PATH unset CFLAGS unset LDFLAGS ((DEBUG)) && printf "\n-----\n cd ${cmake_workdir} \n-----\n" cd ${cmake_workdir} ((DEBUG)) && printf "\n-----\n git checkout v${cmake_version} \n-----\n" git checkout v${cmake_version} [ $? != 0 ] && echo "ERROR: Could not checkout CMake @ v${cmake_version}" && checked_done 2 ((DEBUG)) && printf "\n-----\n ./configure --parallel=${num_cpus} --prefix=${cmake_install_dir} \n-----\n" ./configure --parallel=${num_cpus} --prefix=${cmake_install_dir} [ $? != 0 ] && echo "ERROR: Could not configure CMake successfully" && checked_done 3 ((DEBUG)) && printf "\n-----\n make -j ${num_cpus} \n-----\n" make -j ${num_cpus} [ $? != 0 ] && echo "ERROR: Could not make CMake successfully" && checked_done 4 ((DEBUG)) && printf "\n-----\n make install \n-----\n" make install [ $? != 0 ] && echo "ERROR: Could not install CMake successfully" && checked_done 5 ) echo "returning from build subshell with code: [$?]" (( $? > 1 )) && echo "ERROR: Could not setup CMake successfully aborting... " && checked_done 1 cmake_version=$(${cmake_install_dir}/bin/cmake --version | awk '{print $3}' | sed -re 's/([^-]*)-.*/\1/') printf "\ninstalled CMake version = ${cmake_version}\n\n" write_cmake_env write_cmake_install_log checked_done 0 } write_cmake_env() { ((show_summary_latch++)) echo "export CMAKE_HOME=${cmake_install_dir}" >> ${envfile} prefix_to_path PATH ${cmake_install_dir}/bin >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } write_cmake_install_log() { echo "$(date ${date_format}) cmake=${cmake_version} ${cmake_install_dir}" >> ${install_manifest} dedup ${install_manifest} return 0 } ##### # CDAT = Python+CDMS ##### setup_cdat() { echo -n "Checking for *UV* CDAT (Python+CDMS) ${cdat_version} " #----------------------------------------------------- cdat_home=$(perl -pe 's/(? /dev/null local ret=$? ((ret == 0)) && (( ! force_install )) && [OK] && return 0 echo echo "*******************************" echo "Setting up CDAT - (Python + CDMS)... ${cdat_version}" echo "*******************************" echo local dosetup="N" # TODO - remove old installation # upgrate existing installation if [ -x ${cdat_home}/bin/python ]; then echo "Detected an existing CDAT installation..." read -e -p "Do you want to continue with CDAT installation and setup? [y/N] " dosetup if [ "${dosetup}" != "Y" ] && [ "${dosetup}" != "y" ]; then echo "Skipping CDAT installation and setup - will assume CDAT is setup properly" return 0 fi echo fi mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null #--------- # Under FORCE (--force) semantics we clean everything out: the cloned repo and build dir #((force_install)) && [ -d ${workdir}/uvcdat ] && \rm -rf ${workdir}/uvcdat && \rm -rf ${workdir}/uvcdat_build #--------- # TODO check for conda if [ ! -f Miniconda2-latest-Linux-x86_64.sh ] ; then wget --no-check-certificate https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh [ $? != 0 ] && printf "$([FAIL]) \n\tCould not fetch miniconda setup\n\n" && checked_done 1 fi bash Miniconda2-latest-Linux-x86_64.sh -b -p $cdat_home [ $? != 0 ] && printf "$([FAIL]) \n\tError in executing Miniconda setup\n\n" && checked_done 1 export PATH=${cdat_home}/bin:$PATH if [ $ESGF_INSECURE > 0 ] ; then conda config --set ssl_verify False # binstar config --set verify_ssl False fi # create a default environment for the publisher with cduitl (and cdms2) if [ ! -d ${cdat_home}/envs/esgf-pub ] ; then unset LD_LIBRARY_PATH conda create -y -n esgf-pub -c conda-forge -c uvcdat cdutil [ $? != 0 ] && printf "$([FAIL]) \n\tCould not install or update uvcdat via conda\n\n" && checked_done 1 fi source activate esgf-pub python -c "import cdms2" 2>/dev/null [ $? != 0 ] && echo " ERROR: Could not load CDMS (cdms2) module" && popd && checked_done 1 source deactivate popd >& /dev/null echo #NOTE: order is important in next three calls... # _housekeeping_cdat_to_uvcdat write_cdat_env #---------------- # write_cdat_install_log checked_done 0 } write_cdat_env() { ((show_summary_latch++)) echo "export CDAT_HOME=${cdat_home}" >> ${envfile} echo "export UVCDAT_ANONYMOUS_LOG=False" >> ${envfile} prefix_to_path PATH ${cdat_home}/bin >> ${envfile} prefix_to_path LD_LIBRARY_PATH ${cdat_home}/Externals/lib >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } write_cdat_install_log() { echo "$(date ${date_format}) uvcdat=${cdat_version} ${cdat_home}" >> ${install_manifest} #Parse the cdat installation config.log file and entries to the install log local build_log=${workdir}/uvcdat_build/build_info.txt if [ -e "${build_log}" ]; then awk '{print "'"$(date ${date_format})"' uvcdat->"$1"="$2" '"${cdat_home}"'"}' ${build_log} | sed '$d' >> ${install_manifest} else echo " WARNING: Could not find cdat build logfile [${build_log}], installation log entries could not be generated!" fi dedup ${install_manifest} return 0 } test_cdat() { echo -n "Checking for cdms2 module... " python -c "import cdms2" >& /dev/null && [OK] || [FAIL] } #------------------------------------------------------------ #Special function for the upgrade scenario (cdat -> uvcdat) #------------------------------------------------------------# _housekeeping_cdat_to_uvcdat() { #Determine the install directory for cdat from the install_manifest #(NOTE: Whatever the resulting path is, it must contain the string "cdat") local cdat_installed_dir=$(sed -nre 's/.*[^v]cdat=([^ ]*)([^ ]*.*cdat)/\2/p' ${install_manifest} | tr -d " ") [[ ${cdat_installed_dir} ]] && backup ${cdat_installed_dir} && rm -rf ${cdat_installed_dir} #Remove all cdat install manifest entries from install manifest (NOTE: Not touching new uvcdat entries) sed -ire '/[^v]cdat/d' ${install_manifest} #reset library path to point to new uvcdat location myLD_LIBRARY_PATH=$(perl -pe 's/(?& /dev/null echo "$(pwd)" unset LD_LIBRARY_PATH source ${cdat_home}/bin/activate esgf-pub conda install -y -c conda-forge lxml requests psycopg2 decorator Tempita myproxyclient udunits2 esgf-pyclient cython if [ $ESGF_INSECURE > 0 ] ; then pipcmd="pip install --index-url=http://pypi.python.org/simple --trusted-host pypi.python.org" else pipcmd="pip install" fi $pipcmd esgprep==${esgprep_version} $pipcmd SQLAlchemy==0.7.10 $pipcmd sqlalchemy_migrate $pipcmd cdf2cim #clone publisher local publisher_git_protocol="git://" #--------- #Under FORCE (--force) semantics we clean everything out: the cloned repo and build dir ((force_install)) && [ -d ${workdir}/esg-publisher ] && \rm -rf ${workdir}/esg-publisher #--------- if [ ! -d ${workdir}/esg-publisher ]; then echo "Fetching the cdat project from GIT Repo..." ((DEBUG)) && echo "${publisher_repo}" git clone ${publisher_repo} esg-publisher if [ ! -d ${workdir}/esg-publisher/.git ]; then publisher_git_protocol="https://" echo "Apparently was not able to fetch from GIT repo using git protocol... trying https protocol..." ((DEBUG)) && echo "${publisher_repo_https}" git clone ${publisher_repo_https} esg-publisher [ ! -d ${workdir}/esg-publisher/.git ] && echo "Could not fetch from cdat's repo (with git nor https protocol)" && checked_done 1 fi fi cd esg-publisher >& /dev/null git pull git checkout ${publisher_tag} [ $? != 0 ] && echo " WARNING: Problem with checking out publisher (esgcet) revision [${esgcet_version}] from repository :-(" #install publisher ( cd src/python/esgcet python setup.py install ) [ $? != 0 ] && checked_done 1 if [ "${mode}" = "I" ]; then local choice while [ 1 ]; do unset choice printf "Would you like a \"system\" or \"user\" publisher configuration: \n" printf "\t-------------------------------------------\n" printf "\t*[1] : System\n" printf "\t [2] : User\n" printf "\t-------------------------------------------\n" printf "\t [C] : (Custom)\n" printf "\t-------------------------------------------\n" read -e -p "select [1] > " choice [ -z "${choice}" ] && choice=1 #default case ${choice} in 2) publisher_home=${HOME}/.esgcet ;; 1) publisher_home=${esg_config_dir}/esgcet ;; c | C) local input read -e -p "Please enter the desired publisher configuration directory [${publisher_home}] " input [ -n "${input}" ] && publisher_home=${input} unset input read -e -p "Please enter the desired publisher configuration filename [${publisher_config}] " input [ -n "${input}" ] && publisher_config=${input} unset input choice="(Manual Entry)" ;; *) echo "Invalid selection [${choice}]" esac echo echo "You have selected: ${choice}" echo "Publisher configuration file -> [${publisher_home}/${publisher_config}]" echo local is_correct read -e -p "Is this correct? [Y/n] " is_correct is_correct=$(echo ${is_correct} | tr 'A-Z' 'a-z') if [ "${is_correct}" = "n" ]; then continue else break fi done unset choice export ESGINI=${publisher_home}/${publisher_config} echo "Your publisher configuration file will be: ${publisher_home}/${publisher_config}" local input="" read -e -p "What is your organization's id? [${esg_root_id}]: " input [ ! -z "${input}" ] && esg_root_id=${input} echo "esgsetup --config $( ((${recommended} == 1 )) && echo "--minimal-setup" ) --rootid ${esg_root_id}" mkdir -p ${publisher_home} ESGINI=${publisher_home}/${publisher_config} esgsetup --config $( ((${recommended} == 1 )) && echo "--minimal-setup" ) --rootid ${esg_root_id} sed -i s/"host\.sample\.gov"/${esgf_host}/g ${publisher_home}/${publisher_config} sed -i s/"LASatYourHost"/LASat${node_short_name}/g ${publisher_home}/${publisher_config} [ $? != 0 ] && popd && checked_done 1 fi echo "chown -R ${installer_uid}:${installer_gid} ${publisher_home}" chown -R ${installer_uid}:${installer_gid} ${publisher_home} [ $? != 0 ] && echo "**WARNING**: Could not change owner successfully - this will lead to inability to use the publisher properly!" #Let's make sure the group is there before we attempt to assign a file to it.... if [ ! $(getent group ${tomcat_group}) ]; then /usr/sbin/groupadd -r ${tomcat_group} [ $? != 0 ] && [ $? != 9 ] && echo "ERROR: *Could not add tomcat system group: ${tomcat_group}" && popd && checked_done 1 fi chgrp ${tomcat_group} ${publisher_home}/${publisher_config} && chmod 640 ${publisher_home}/${publisher_config} && chmod 700 ${HOME} [ $? != 0 ] && echo "**WARNING**: Could not change group successfully - this will lead to inability to use the publisher properly!" start_postgress if [ "${mode}" = "I" ]; then if ((DEBUG)); then echo "ESGINI=${publisher_home}/${publisher_config} $cdat_home/bin/esgsetup $( ((${recommended} == 1 )) && echo "--minimal-setup" ) --db $( [ -n "${db_database}" ] && echo "--db-name ${db_database}" ) $( [ -n "${postgress_user}" ] && echo "--db-admin ${postgress_user}" ) $([ -n "${pg_sys_acct_passwd:=${security_admin_password}}" ] && echo "--db-admin-password ${pg_sys_acct_passwd}") $( [ -n "${publisher_db_user}" ] && echo "--db-user ${publisher_db_user}" ) $([ -n "${publisher_db_user_passwd}" ] && echo "--db-user-password ${publisher_db_user_passwd}") $( [ -n "${postgress_host}" ] && echo "--db-host ${postgress_host}" ) $( [ -n "${postgress_port}" ] && echo "--db-port ${postgress_port}" )" else echo "ESGINI=${publisher_home}/${publisher_config} $cdat_home/bin/esgsetup $( ((${recommended} == 1 )) && echo "--minimal-setup" ) --db $( [ -n "${db_database}" ] && echo "--db-name ${db_database}" ) $( [ -n "${postgress_user}" ] && echo "--db-admin ${postgress_user}" ) $([ -n "${pg_sys_acct_passwd:=${security_admin_password}}" ] && echo "--db-admin-password ******") $( [ -n "${publisher_db_user}" ] && echo "--db-user ${publisher_db_user}" ) $([ -n "${publisher_db_user_passwd}" ] && echo "--db-user-password ******") $( [ -n "${postgress_host}" ] && echo "--db-host ${postgress_host}" ) $( [ -n "${postgress_port}" ] && echo "--db-port ${postgress_port}" )" fi ESGINI=${publisher_home}/${publisher_config} esgsetup $( ((${recommended} == 1 )) && echo "--minimal-setup" ) --db $( [ -n "${db_database}" ] && echo "--db-name ${db_database}" ) $( [ -n "${postgress_user}" ] && echo "--db-admin ${postgress_user}" ) $([ -n "${pg_sys_acct_passwd:=${security_admin_password}}" ] && echo "--db-admin-password ${pg_sys_acct_passwd}") $( [ -n "${publisher_db_user}" ] && echo "--db-user ${publisher_db_user}" ) $([ -n "${publisher_db_user_passwd}" ] && echo "--db-user-password ${publisher_db_user_passwd}") $( [ -n "${postgress_host}" ] && echo "--db-host ${postgress_host}" ) $( [ -n "${postgress_port}" ] && echo "--db-port ${postgress_port}" ) [ $? != 0 ] && popd && checked_done 1 fi unset choice has_cmor=`conda list | grep cmor | awk '{print $1}'` if [ ! -z $has_cmor ] ; then echo found cmor installation for PrePARE - upgrading to specified version ${cmor_version} conda install -y -c conda-forge -c pcmdi -c uvcdat cmor==${cmor_version} fi if [ ! -d /usr/local/cmip6-cmor-tables ] ; then read -e -p "Would you like to configure this node for CMIP6 publishing (additional project dependencies will be installed)? [y/N] " choice choice=$(echo ${choice} | tr 'A-Z' 'a-z') if [ -z $choice ] ; then choice="n" fi if [ $choice == "y" ] ; then conda install -y -c conda-forge -c pcmdi -c uvcdat cmor==${cmor_version} pip install esgfpid pushd /usr/local git clone https://github.com/PCMDI/cmip6-cmor-tables esgf_pub_group="esgfpub" if [ ! $(getent group ${esgf_pub_group}) ]; then /usr/sbin/groupadd -r ${esgf_pub_group} [ $? != 0 ] && [ $? != 9 ] && echo "ERROR: Could not add esgf_pub system group: ${esgf_pub_roup}" && popd && checked_done 1 fi chgrp -R ${esgf_pub_group} cmip6-cmor-tables chmod -R g+rwx cmip6-cmor-tables popd fi fi echo "esginitialize -c" esginitialize -c [ $? != 0 ] && popd && checked_done 1 popd >& /dev/null echo echo write_esgcet_env write_esgcet_install_log checked_done 0 source deactivate } write_esgcet_env() { echo "export ESG_ROOT_ID=$esg_root_id" >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } write_esgcet_install_log() { echo "$(date ${date_format}) python:esgcet=${esgcet_version}" >> ${install_manifest} dedup ${install_manifest} write_as_property publisher_config write_as_property publisher_home write_as_property monitor.esg.ini ${publisher_home}/${publisher_config} return 0 } test_esgcet() { echo echo "----------------------------" echo "ESGCET Test... " echo "----------------------------" echo pushd $workdir >& /dev/null start_postgress #esgcet_testdir=$(readlink -f ${thredds_root_dir})/test esgcet_testdir=${thredds_root_dir}/test mkdir -p ${esgcet_testdir} [ $? != 0 ] && checked_done 1 chown ${installer_uid}:${installer_gid} ${esgcet_testdir} >& /dev/null mkdir -p ${thredds_replica_dir} [ $? != 0 ] && checked_done 1 chown ${installer_uid}:${installer_gid} ${thredds_replica_dir} >& /dev/null echo "esgcet test directory: [${esgcet_testdir}]" local fetch_file fetch_file=sftlf.nc checked_get ${esgcet_testdir}/${fetch_file} ${esg_dist_url_root}/externals/${fetch_file} $((force_install)) (( $? > 1 )) && echo " ERROR: Problem pulling down ${fetch_file} from esg distribution" && popd && checked_done 1 source ${cdat_home}/bin/activate esgf-pub #Run test... echo "$cdat_home/bin/esginitialize -c " esginitialize -c echo "$cdat_home/bin/esgscan_directory --dataset pcmdi.${esg_root_id}.${node_short_name}.test.mytest --project test ${esgcet_testdir} > mytest.txt" esgscan_directory --dataset pcmdi.${esg_root_id}.${node_short_name}.test.mytest --project test ${esgcet_testdir} > mytest.txt [ $? != 0 ] && echo " ERROR: ESG directory scan failed" && popd && checked_done 1 echo "$cdat_home/bin/esgpublish --service fileservice --map mytest.txt --project test --model test" esgpublish --service fileservice --map mytest.txt --project test --model test [ $? != 0 ] && echo " ERROR: ESG publish failed" && popd && checked_done 1 popd >& /dev/null echo echo checked_done 0 } esgcet_startup_hook() { echo -n "ESGCET (Publisher) Startup Hook: Setting perms... " [ ! -e ${publisher_home}/${publisher_config} ] && echo_fail" Could not find publisher configuration file [${publisher_home}/${publisher_config}] :-(" && return 1 chgrp ${tomcat_group} ${publisher_home}/${publisher_config} && chmod 644 ${publisher_home}/${publisher_config} && chmod 700 ${HOME} && echo_ok ":-)" [ $? != 0 ] && echo_fail ":-/" && return 2 } ##### # Apache Tomcat # arg 1 -> The password for the current keystore ##### setup_tomcat() { echo -n "Checking for tomcat >= ${tomcat_min_version} " check_app_version ${tomcat_install_dir} ${tomcat_min_version} local ret=$? ((ret == 0)) && (( ! force_install )) && [OK] && return 0 echo echo "*******************************" echo "Setting up Apache Tomcat...(v${tomcat_version})" echo "*******************************" echo local upgrade=${1:-0} local last_install=$(readlink -f ${tomcat_install_dir}) local default="Y" ((force_install)) && default="N" local dosetup if [ -x ${tomcat_install_dir}/bin/jsvc ]; then echo "Detected an existing tomcat installation..." read -e -p "Do you want to continue with Tomcat installation and setup? $([ "$default" = "N" ] && echo "[y/N]" || echo "[Y/n]") " dosetup [ -z "${dosetup}" ] && dosetup=${default} if [ "${dosetup}" != "Y" ] && [ "${dosetup}" != "y" ]; then echo "Skipping tomcat installation and setup - will assume tomcat is setup properly" return 0 fi echo fi mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null local tomcat_dist_file=${tomcat_dist_url##*/} #strip off .tar.gz at the end tomcat_dist_dir=$(echo ${tomcat_dist_file} | awk 'gsub(/('$compress_extensions')/,"")') #There is this pesky case of having a zero sized dist file... if [ -e ${tomcat_dist_file} ]; then ls -l ${tomcat_dist_file} local size=$(stat -c%s ${tomcat_dist_file}) (( size == 0 )) && rm -v ${tomcat_dist_file} fi #Check to see if we have a tomcat distribution directory if [ ! -e ${tomcat_install_dir%/*}/${tomcat_dist_dir} ]; then echo "Don't see tomcat distribution dir ${tomcat_install_dir%/*}/${tomcat_dist_dir}" if [ ! -e ${tomcat_dist_file} ]; then echo "Don't see tomcat distribution file $(pwd)/${tomcat_dist_file} either" echo "Downloading Tomcat from ${tomcat_dist_url}" wget -O ${tomcat_dist_file} ${tomcat_dist_url} [ $? != 0 ] && echo " ERROR: Could not download Tomcat" && popd && checked_done 1 echo "unpacking ${tomcat_dist_file}..." tar xzf ${tomcat_dist_file} -C ${tomcat_install_dir%/*} # i.e. /usr/local [ $? != 0 ] && echo " ERROR: Could not extract Tomcat" && popd && checked_done 1 fi fi #If you don't see the directory but see the tar.gz distribution #then expand it if [ -e ${tomcat_dist_file} ] && [ ! -e ${tomcat_install_dir%/*}/${tomcat_dist_dir} ]; then echo "unpacking ${tomcat_dist_file}..." tar xzf ${tomcat_dist_file} -C ${tomcat_install_dir%/*} # i.e. /usr/local [ $? != 0 ] && echo " ERROR: Could not extract Tomcat..." && popd && checked_done 1 fi if [ ! -e ${tomcat_install_dir} ]; then (cd ${tomcat_install_dir%/*} && ln -s ${tomcat_dist_dir} ${tomcat_install_dir}) [ $? != 0 ] && \ echo " ERROR: Could not create sym link ${tomcat_install_dir%/*}/${tomcat_dist_dir} -> ${tomcat_install_dir}" && popd && checked_done 1 else unlink ${tomcat_install_dir} >& /dev/null [ $? != 0 ] && mv ${tomcat_install_dir} ${tomcat_install_dir}.$(date ${date_format}).bak (cd ${tomcat_install_dir%/*} && ln -s ${tomcat_dist_dir} ${tomcat_install_dir}) [ $? != 0 ] && \ echo " ERROR: Could not create sym link ${tomcat_install_dir%/*}/${tomcat_dist_dir} -> ${tomcat_install_dir}" && popd && checked_done 1 fi #If there is no tomcat user on the system create one (double check that usradd does the right thing) id $tomcat_user if [ $? != 0 ]; then echo " WARNING: There is no tomcat user \"$tomcat_user\" present on system" #NOTE: "useradd/groupadd" are a RedHat/CentOS thing... to make this cross distro compatible clean this up. if [ ! $(getent group ${tomcat_group}) ]; then /usr/sbin/groupadd -r ${tomcat_group} [ $? != 0 ] && [ $? != 9 ] && echo "ERROR: Could not add tomcat system group: ${tomcat_group}" && popd && checked_done 1 fi echo "/usr/sbin/useradd -r -c"Tomcat Server Identity" -g $tomcat_group $tomcat_user" /usr/sbin/useradd -r -c"Tomcat Server Identity" -g $tomcat_group $tomcat_user [ $? != 0 ] && [ $? != 9 ] && echo "ERROR: Could not add tomcat system account user \"$tomcat_user\"" && popd && checked_done 1 fi cd $tomcat_install_dir #---------- #build jsvc (if necessary) #---------- echo -n "Checking for jsvc... " ( pushd ./bin >& /dev/null #https://issues.apache.org/jira/browse/DAEMON-246 LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib${word_size} if [ -e ./jsvc ] && [ -x ./jsvc ]; then [OK] else echo "[NOT PRESENT]" stop_tomcat echo "Building jsvc... (JAVA_HOME=$java_install_dir)" if [ -e commons-daemon-native.tar.gz ]; then echo "unpacking commons-daemon-native.tar.gz..." tar xzf commons-daemon-native.tar.gz cd commons-daemon-*-native-src/unix #It turns out they shipped with a conflicting .o file in there (oops) so I have to remove it manually. rm -f ./native/libservice.a make clean elif [ -e jsvc.tar.gz ]; then echo "unpacking jsvc.tar.gz..." tar xzf jsvc.tar.gz cd jsvc-src autoconf else echo "NOT ABLE TO INSTALL JSVC!" && checked_done 1 fi chmod 755 ./configure ./configure --with-java=${java_install_dir} make -j ${num_cpus} [ -x ./jsvc ] && cp ./jsvc ${tomcat_install_dir}/bin fi [ ! -e /usr/lib/libcap.so ] && [ -e /lib${word_size}/libcap.so ] && ln -s /lib${word_size}/libcap.so /usr/lib/libcap.so >& /dev/null popd >& /dev/null ) #---------- #---------------------------------- # Upgrade logic... #---------------------------------- if ((upgrade)) ; then stop_tomcat echo "Upgrading tomcat installation from $(readlink -f ${last_install} | sed -ne 's/.*-\(.*\)$/\1/p') to $(readlink -f ${tomcat_install_dir} | sed -ne 's/.*-\(.*\)$/\1/p')" echo -n "copying webapps... " cp -R ${last_install}/webapps ${tomcat_install_dir}/ [ $? == 0 ] && [OK] || [FAIL] echo -n "copying configuration... " cp -R ${last_install}/conf ${tomcat_install_dir}/ [ $? == 0 ] && [OK] || [FAIL] echo -n "copying logs... " cp -R ${last_install}/logs ${tomcat_install_dir}/ [ $? == 0 ] && [OK] || [FAIL] echo "upgrade migration complete" else if [ -s "$ks_secret_file" ]; then configure_tomcat ${keystore_password:-$(cat ${ks_secret_file} 2> /dev/null)} else configure_tomcat ${keystore_password:-${security_admin_password}} fi fi #---------------------------------- chown -R $tomcat_user $(readlink -f ${tomcat_install_dir}) [ $? != 0 ] && " ERROR: Could not change ownership of tomcat to \"$tomcat_user\" user" && popd && checked_done 1 chgrp -R $tomcat_group $(readlink -f ${tomcat_install_dir}) [ $? != 0 ] && " ERROR: Could not change group of tomcat to \"$tomcat_user\" user" && popd && checked_done 1 #------------------------------- # For Security Reasons... #------------------------------- ( cd ${tomcat_install_dir}/webapps || return 1 echo "Checking for unnecessary webapps with dubious security implications as a precaution..." superfluous_dirs=(examples docs host-manager manager) for superfluous_dir in ${superfluous_dirs[@]}; do [ ! -e "${superfluous_dir}" ] && continue local superfluous_dir=$(readlink -f ${superfluous_dir}) echo -n "Removing ${superfluous_dir} ... " rm -rf ${superfluous_dir} && [OK] || [FAIL] done ) setup_root_app checked_get ${tomcat_install_dir}/webapps/ROOT/robots.txt ${esg_dist_url}/robots.txt $((force_install)) checked_get ${tomcat_install_dir}/webapps/ROOT/favicon.ico ${esg_dist_url}/favicon.ico $((force_install)) migrate_tomcat_credentials_to_esgf sleep 2 start_tomcat tomcat_port_check && echo "Tomcat ports checkout $([OK])" || ([FAIL] && popd && checked_done 1) echo popd >& /dev/null echo echo write_tomcat_env write_tomcat_install_log return 0 } #If there is no logrotate file ${tomcat_logrotate_file} then create one #default is to cut files after 512M up to 20 times (10G of logs) #No file older than year should be kept. setup_tomcat_logrotate() { [ ! -e /usr/sbin/logrotate ] && echo "Not able to find logrotate here [/usr/local/logrotate] $([FAIL])" && return 1 #TODO: Make these vars properties that can be set by user. # Check property values against values in the logrotate file values to know when to rewrite. local log_rot_size="512M" local log_rot_num_files="20" local tomcat_logrotate_file="/etc/logrotate.d/esgf_tomcat" local tomcat_install_dir=${tomcat_install_dir:="/usr/local/tomcat"} [ ! -e "${tomcat_install_dir}/logs" ] && echo "Sorry, could not find tomcat log dir [${tomcat_install_dir}/logs] $([FAIL])" && return 2 if [ ! -e "${tomcat_logrotate_file}" ] || ((force_install)); then echo "Installing tomcat log rotation... [${tomcat_logrotate_file}]" cat >> ${tomcat_logrotate_file} <etc/httpd/conf/esgf-httpd.conf; sed -i '/\#permitted-ips-start-here/,/\#permitted-ips-end-here/d' etc/httpd/conf/esgf-httpd.conf; sed -i "s/\(.*\)LoadModule wsgi_module $quotedwsgipath\(.*\)/\1LoadModule wsgi_module placeholder_so\2/" etc/httpd/conf/esgf-httpd.conf; incsfile=`echo Include /etc/httpd/conf/esgf-httpd-locals.conf|sed 's/[./*?|]/\\\\&/g'`; incfile=`echo Include /etc/httpd/conf/esgf-httpd-local.conf|sed 's/[./*?|]/\\\\&/g'`; uncommentedincfile=0 uncommentedincsfile=0 if ! grep -w 'Include /etc/httpd/conf/esgf-httpd-locals.conf' etc/httpd/conf/esgf-httpd.conf|grep '#' >/dev/null; then uncommentedincsfile=1; sed -i "s/$incsfile/\#$incsfile/" etc/httpd/conf/esgf-httpd.conf; fi if ! grep -w 'Include /etc/httpd/conf/esgf-httpd-local.conf' etc/httpd/conf/esgf-httpd.conf|grep '#' >/dev/null; then uncommentedincfile=1; sed -i "s/$incfile/\#$incfile/" etc/httpd/conf/esgf-httpd.conf; fi head -22 etc/httpd/conf/esgf-httpd.conf >etc/httpd/conf/origsrvlines head -22 etc/httpd/conf/esgf-httpd.conf.tmpl >etc/httpd/conf/defaultsrvlines sed -ie '1,22d' etc/httpd/conf/esgf-httpd.conf sed -ie '1,22d' etc/httpd/conf/esgf-httpd.conf.tmpl if ! diff etc/httpd/conf/esgf-httpd.conf.tmpl etc/httpd/conf/esgf-httpd.conf >/dev/null; then #we have changes; add allowed ips, ext file selection and wsgi path to latest template and apply debug_print "Detected changes. Will update and reapply customizations. An esg-node restart would be needed to read in the changes."; cat etc/httpd/conf/esgf-httpd.conf.tmpl >>etc/httpd/conf/origsrvlines && mv etc/httpd/conf/origsrvlines etc/httpd/conf/esgf-httpd.conf.tmpl cp etc/httpd/conf/esgf-httpd.conf.tmpl etc/httpd/conf/esgf-httpd.conf sed -i "s/\(.*\)LoadModule wsgi_module placeholder_so\(.*\)/\1LoadModule wsgi_module $quotedwsgipath\2/" etc/httpd/conf/esgf-httpd.conf; sed -i "s/\#insert-permitted-ips-here/\#permitted-ips-start-here\n$allowedips\n\t#permitted-ips-end-here/" etc/httpd/conf/esgf-httpd.conf; if [ $uncommentedincfile -eq 1 ]; then sed -i "s/\#$incfile/$incfile/" etc/httpd/conf/esgf-httpd.conf; fi if [ $uncommentedincsfile -eq 1 ]; then sed -i "s/\#$incsfile/$incsfile/" etc/httpd/conf/esgf-httpd.conf; fi cp /etc/httpd/conf/esgf-httpd.conf /etc/httpd/conf/esgf-httpd.conf.bck; cp etc/httpd/conf/esgf-httpd.conf /etc/httpd/conf/esgf-httpd.conf; else debug_print "No changes detected in apache frontend conf."; fi popd; popd; } setup_sensible_confs() { #place for post-install configuration overrides, binary replacements etc #quick-fix for removing insecure commons-fileupload jar file if [ -s /usr/local/solr/server/solr-webapp/webapp/WEB-INF/lib/commons-fileupload-1.2.1.jar ]; then rm -f /usr/local/solr/server/solr-webapp/webapp/WEB-INF/lib/commons-fileupload-1.2.1.jar; cp ${tomcat_install_dir}/webapps/esg-search/WEB-INF/lib/commons-fileupload-1.3.1.jar /usr/local/solr/server/solr-webapp/webapp/WEB-INF/lib/; fi #configuration overrides tmpservername='placeholder.fqdn'; quotedtmpservername=`echo "$tmpservername" | sed 's/[./*?|]/\\\\&/g'`; servername=$esgf_host; quotedservername=`echo "$servername" | sed 's/[./*?|]/\\\\&/g'`; sconffiles="esgf_ats.xml.tmpl esgf_azs.xml.tmpl esgf_idp.xml.tmpl"; for i in `echo $sconffiles`; do cksum=`curl -s --insecure ${esg_dist_url_root}/confs/$i.md5|awk '{print $1}'`; ccksum=`curl -s --insecure ${esg_dist_url_root}/confs/$i|md5sum|awk '{print $1}'`; if [ "$cksum" != "$ccksum" ]; then echo "Checksum did not tally for file $i. Giving up."; continue; fi if echo $i|grep tmpl >/dev/null; then fn=`echo $i|sed "s/\(.*\).tmpl/\1/"`; curl -s --insecure ${esg_dist_url_root}/confs/$i|sed "s/\(.*\)$quotedtmpservername\(.*\)/\1$quotedservername\2/" >$esg_config_dir/$fn; else fn=$i; curl -s --insecure ${esg_dist_url_root}/confs/$i >$esg_config_dir/$fn; fi chown apache:apache $esg_config_dir/$fn; chmod a+r $esg_config_dir/$fn; done chmod a+r $esg_config_dir/esgf_idp_static.xml } setup_apache_frontend() { olddir=`pwd`; localworkdir=${ESGF_INSTALL_WORKDIR:-"${installer_home}/workbench/esg"}; cd $localworkdir; mkdir apache_frontend; cd apache_frontend; git clone ${apache_frontend_repo} if [ $? -eq 0 ]; then cd apache-frontend; if [ $devel -eq 1 ]; then git checkout $apache_frontend_tag; else git checkout master; fi hn=${esgf_host:-`hostname -f`} echo -e "$hn\ny" >answers /etc/init.d/httpd stop >/dev/null; #/usr/sbin/apachectl stop > /dev/null; chkconfig --levels 2345 httpd off bash copyfiles.sh ${tomcat_install_dir}/conf >/etc/certs/esgf-ca-bundle.crt; /etc/init.d/esgf-httpd start #/usr/sbin/apachectl -f /etc/httpd/conf/esgf-httpd.conf -k start else export LD_LIBRARY_PATH=/opt/esgf/python/lib:/opt/esgf/python/lib/python2.7:/opt/esgf/python/lib/python2.7/site-packages/mod_wsgi/server /opt/esgf/python/bin/pip install --upgrade pip /opt/esgf/python/bin/pip install --upgrade virtualenv unset LD_LIBRARY_PATH conf_file=/etc/httpd/conf/esgf-httpd.conf if [ -f $conf_file ] ; then esgf_httpd_ver=`grep ESGF-HTTPD-CONF $conf_file | awk '{print $4}'` if [ -z $esgf_httpd_ver ] ; then echo "esgf-httpd.conf is missing versioning, attempting to update." update_apache_conf else check_version_atleast $esgf_httpd_ver $apache_frontend_version if [ $? == 0 ] ; then echo "esgf-httpd.conf version is sufficient" else echo "esgf-httpd version is out-of-date, attempting to update." update_apache_conf fi fi else echo "esgf-httpd.conf file not found, attempting to update. This condition is not expected to occur and should be reported to ESGF support" update_apache_conf fi fi } #helper function to poke at tomcat ports... tomcat_port_check() { #--- #port testing for http and https #--- local ret_all=0 local protocol="http" echo "checking connection at all ports described in ${tomcat_install_dir}/conf/server.xml" while read port key value; do if [ "$port" = "8223" ]; then continue; fi if [ "$port" = "8443" ]; then protocol="https" fi echo -n "checking localhost port [${port}]" local wait_time=5 local ret=1 while [[ $wait_time > 0 ]]; do curl -k ${protocol}://localhost:${port} >& /dev/null ret=$? [ $ret == 0 ] && break sleep 1 : $((wait_time--)) echo -n "." done [ $ret == 0 ] && [OK] || [FAIL] #We only care about reporting a failure for ports below 1024 #specifically 80 (http) and 443 (https) [ "${key}" = "protocol" ] && [ -n "$(echo ${value} | grep -i http)" ] && esgf_http_port=${port} [ "${key}" = "SSLEnabled" ] && esgf_https_port=${port} (($port < 1024)) && ((ret_all+=ret)) done < <(echo "$(sed -n 's/.*Connector.*port="\([0-9]*\)"[ ]*\([^ ]*\)="\([^ ]*\)"/\1 \2 \3/p' ${tomcat_install_dir}/conf/server.xml)") #--- return ${ret_all} } write_tomcat_env() { ((show_summary_latch++)) echo "export CATALINA_HOME=${CATALINA_HOME}" >> ${envfile} prefix_to_path PATH ${tomcat_install_dir}/bin >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } write_tomcat_install_log() { echo "$(date ${date_format}) tomcat=${tomcat_version} $(readlink -f ${tomcat_install_dir})" >> ${install_manifest} dedup ${install_manifest} write_as_property tomcat_install_dir write_as_property esgf_http_port 80 write_as_property esgf_https_port 443 return 0 } migrate_tomcat_credentials_to_esgf() { #Move selected config files into esgf tomcat's config dir (certificate et al) #Ex: /esg/config/tomcat #-rw-r--r-- 1 tomcat tomcat 181779 Apr 22 19:44 esg-truststore.ts #-r-------- 1 tomcat tomcat 887 Apr 22 19:32 hostkey.pem #-rw-r--r-- 1 tomcat tomcat 1276 Apr 22 19:32 keystore-tomcat #-rw-r--r-- 1 tomcat tomcat 590 Apr 22 19:32 pcmdi11.llnl.gov-esg-node.csr #-rw-r--r-- 1 tomcat tomcat 733 Apr 22 19:32 pcmdi11.llnl.gov-esg-node.pem #-rw-r--r-- 1 tomcat tomcat 295 Apr 22 19:42 tomcat-users.xml #Only called when migration conditions are present. if [ "${tomcat_install_dir}/conf" != "${tomcat_conf_dir}" ]; then [ ! -e ${tomcat_conf_dir} ] && mkdir -p ${tomcat_conf_dir} backup ${tomcat_install_dir}/conf ((DEBUG)) && echo "Moving credential files into node's tomcat configuration dir: ${tomcat_conf_dir}" [ -e ${tomcat_install_dir}/conf/${truststore_file##*/} ] && [ ! -e ${truststore_file} ] && mv -v ${tomcat_install_dir}/conf/${truststore_file##*/} ${truststore_file} && echo -n "+" [ -e ${tomcat_install_dir}/conf/${keystore_file##*/} ] && [ ! -e ${keystore_file} ] && mv -v ${tomcat_install_dir}/conf/${keystore_file##*/} ${keystore_file} && echo -n "+" [ -e ${tomcat_install_dir}/conf/${tomcat_users_file##*/} ] && [ ! -e ${tomcat_users_file} ] && mv -v ${tomcat_install_dir}/conf/${tomcat_users_file##*/} ${tomcat_users_file} && echo -n "+" [ -e ${tomcat_install_dir}/conf/hostkey.pem ] && [ ! -e ${tomcat_conf_dir}/hostkey.pem ] && mv -v ${tomcat_install_dir}/conf/hostkey.pem ${tomcat_conf_dir}/ && echo -n "+" [ -e ${tomcat_install_dir}/conf/${esgf_host:-$(hostname --fqdn)}-esg-node.csr ] && [ ! -e ${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.csr ] && mv -v ${tomcat_install_dir}/conf/${esgf_host:-$(hostname --fqdn)}-esg-node.csr ${tomcat_conf_dir}/ && echo -n "+" [ -e ${tomcat_install_dir}/conf/${esgf_host:-$(hostname --fqdn)}-esg-node.pem ] && [ ! -e ${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.pem ] && mv -v ${tomcat_install_dir}/conf/${esgf_host:-$(hostname --fqdn)}-esg-node.pem ${tomcat_conf_dir}/ && echo -n "+" chown -R ${tomcat_user}.${tomcat_group} ${tomcat_conf_dir} echo #Be sure that the server.xml file contains the explicit Realm specification needed. if ! egrep -q ' 1 )) && popd && checked_done 1 chmod 600 ${tomcat_install_dir}/conf/server.xml chown ${tomcat_user}.${tomcat_group} ${tomcat_install_dir}/conf/server.xml local ks_secret=$(cat ${ks_secret_file} 2> /dev/null) fi if ! egrep -q ' 1 )) && popd && checked_done 1 chmod 600 ${tomcat_install_dir}/conf/server.xml chown ${tomcat_user}.${tomcat_group} ${tomcat_install_dir}/conf/server.xml local ks_secret=$(cat ${ks_secret_file} 2> /dev/null) fi #SET the server.xml variables to contain proper values ((DEBUG)) && echo "Editing ${tomcat_install_dir}/conf/server.xml accordingly..." [[ ${tomcat_users_file} ]] && eval "perl -p -i -e 's#(?<=pathname=)\"([^\"]*)\"#\"${tomcat_users_file}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${truststore_file} ]] && eval "perl -p -i -e 's#(?<=truststoreFile=)\"([^\"]*)\"#\"${truststore_file}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${truststore_password} ]] && eval "perl -p -i -e 's#(?<=truststorePass=)\"([^\"]*)\"#\"${truststore_password}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${keystore_file} ]] && eval "perl -p -i -e 's#(?<=keystoreFile=)\"([^\"]*)\"#\"${keystore_file}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${keystore_password:-${ks_secret}} ]] && eval "perl -p -i -e 's#(?<=keystorePass=)\"([^\"]*)\"#\"${keystore_password:-${ks_secret}}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${keystore_alias} ]] && eval "perl -p -i -e 's#(?<=keyAlias=)\"([^\"]*)\"#\"${keystore_alias}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo "*" fi } #Utility function to check that a given password is valid for the global scoped ${keystore_file} _check_keystore_password() { local store_password=${1} [ ! -e ${keystore_file} ] && echo "$([FAIL]) No keystore file present [${keystore_file}]" && return 1 $java_install_dir/bin/keytool -list -keystore ${keystore_file} -storepass ${store_password} >& /dev/null [ $? != 0 ] && echo "$([FAIL]) Could not access private keystore ${keystore_file_} with provided password. Try again..." && return 2 return 0 } write_ca_ans_templ(){ cat <setupca.ans.tmpl write_reqhost.ans_templ >reqhost.ans.tmpl echo -e "y\ny" >setuphost.ans cat setupca.ans.tmpl|sed "s/placeholder.fqdn/$hostname/" >setupca.ans cat reqhost.ans.tmpl|sed "s/placeholder.fqdn/$hostname/" >reqhost.ans curl -s -L --insecure ${esg_dist_url_root}$( ((devel == 1)) && echo "/devel" || echo "")/esgf-installer/CA.pl >CA.pl curl -s -L --insecure ${esg_dist_url_root}$( ((devel == 1)) && echo "/devel" || echo "")/esgf-installer/openssl.cnf >openssl.cnf curl -s -L --insecure ${esg_dist_url_root}$( ((devel == 1)) && echo "/devel" || echo "")/esgf-installer/myproxy-server.config >myproxy-server.config perl CA.pl -newca cacert.pem cp CA/private/cakey.pem cakey.pem openssl x509 -in newcert.pem -inform pem -outform pem >hostcert.pem mv newkey.pem hostkey.pem chmod 400 cakey.pem chmod 400 hostkey.pem rm -f new*.pem ESGF_OPENSSL=/usr/bin/openssl cert=cacert.pem tmpsubj='/O=ESGF/OU=ESGF.ORG/CN=placeholder' quotedtmpsubj=`echo "$tmpsubj" | sed 's/[./*?|]/\\\\&/g'`; certsubj=`openssl x509 -in $cert -noout -subject|cut -d ' ' -f2-`; quotedcertsubj=`echo "$certsubj" | sed 's/[./*?|]/\\\\&/g'`; echo "quotedcertsubj=~$quotedcertsubj~"; localhash=`$ESGF_OPENSSL x509 -in $cert -noout -hash`; tgtdir="globus_simple_ca_${localhash}_setup-0"; mkdir $tgtdir; cp $cert $tgtdir/${localhash}.0; print_templ >signing_policy_template; sed "s/\(.*\)$quotedtmpsubj\(.*\)/\1$quotedcertsubj\2/" signing_policy_template >$tgtdir/${localhash}.signing_policy; cp $tgtdir/${localhash}.signing_policy signing-policy tar -cvzf globus_simple_ca_${localhash}_setup-0.tar.gz $tgtdir; rm -rf $tgtdir; rm -f signing_policy_template; mkdir -p /etc/certs cp openssl.cnf /etc/certs/ cp host*.pem /etc/certs/ cp cacert.pem /etc/certs/cachain.pem mkdir -p /etc/esgfcerts } configure_tomcat() { #---------------------------- # TOMCAT Configuration... #---------------------------- echo echo "*******************************" echo "Configuring Tomcat... (for Node Manager)" echo "*******************************" echo setup_tomcat_logrotate pushd ${tomcat_install_dir}/conf #>& /dev/null local fetch_file local genreq=N fetch_file=server.xml checked_get ${tomcat_install_dir}/conf/${fetch_file} ${esg_dist_url}/externals/bootstrap/node.${fetch_file}-v${tomcat_version%%.*} $((force_install)) (( $? > 1 )) && popd && checked_done 1 chmod 600 ${tomcat_install_dir}/conf/${fetch_file} chown ${tomcat_user}.${tomcat_group} ${tomcat_install_dir}/conf/${fetch_file} echo "Looking for keystore [${keystore_file}]... $([ -e ${keystore_file} ] && echo "(found it)" || echo "(don't see one)... ")" #Create a keystore in $tomcat_conf_dir echo "Keystore setup: " local store_password=${1:-$(cat ${ks_secret_file} 2> /dev/null)} if [ ! -e ${keystore_file} ]; then echo "Launching Java's keytool:" if [ -z "${store_password}" ]; then local verify_password while [ 1 ]; do echo read -e -s -p "Please enter the password for this keystore : " store_password [ "${store_password}" = "changeit" ] && break [ -z "${store_password}" ] && echo "Invalid password [${store_password}]" && continue echo read -e -s -p "Please re-enter the password for this keystore: " verify_password if [ "${store_password}" = "${verify_password}" ] ; then echo break else echo "Sorry, values did not match" echo fi done unset verify_password echo else echo "store_password = ******" fi #NOTE: #As Reference on Distingueshed Names (DNs) #http://download.oracle.com/javase/1.4.2/docs/tooldocs/windows/keytool.html #According to that document, case does not matter but ORDER DOES! #See script scope declaration of this variable (default_dname [suffix] = "OU=ESGF.ORG, O=ESGF") local input local use_dn="Y" read -e -p "Would you like to use the DN: (${dname:=${default_dname}}) ? [Y/n]: " input [ -n "${input}" ] && use_dn=$(echo ${input} | tr 'a-z' 'A-Z') unset input ((DEBUG)) && echo "Your selection is ${use_dn}" ((DEBUG)) && echo "dname = ${dname}" if [ -z "${dname}" ] || [ "${use_dn}" = "N" ]; then $java_install_dir/bin/keytool -genkey -alias ${keystore_alias} -keyalg RSA -keystore ${keystore_file} -validity 365 -storepass ${store_password} [ $? != 0 ] && echo " ERROR: keytool genkey command failed" && popd && checked_done 1 else [ -z $(echo $dname | sed -n 's#.*CN=\([^,]*\),.*#\1#p') ] && dname="CN=${esgf_host:-$(hostname --fqdn)}, ${dname}" echo "Using keystore DN = ${dname}" $java_install_dir/bin/keytool -genkey -dname "${dname}" -alias ${keystore_alias} -keyalg RSA -keystore ${keystore_file} -validity 365 -storepass ${store_password} -keypass ${store_password} [ $? != 0 ] && echo " ERROR*: keytool genkey command failed" && popd && checked_done 1 fi unset use_dn genreq="Y" else echo "Using existing keystore \"${keystore_file}\"" fi setup_temp_ca #Fetch/Copy truststore to $tomcat_conf_dir #(first try getting it from distribution server otherwise copy Java's) if [ ! -e ${truststore_file} ]; then fetch_file=${truststore_file##*/} # i.e. esg-truststore.ts if [ "$node_peer_group" = "esgf-test" ]; then checked_get ${truststore_file} ${esg_dist_url_root}/certs/test-federation/${fetch_file} $((force_install)) else checked_get ${truststore_file} ${esg_dist_url_root}/certs/${fetch_file} $((force_install)) fi if (( $? > 1 )); then echo " INFO: Could not download certificates ${fetch_file} for tomcat - will copy local java certificate file" echo "(note - the truststore password will probably not match!)" cp -v ${java_install_dir}/jre/lib/security/cacerts ${truststore_file} [ $? != 0 ] && echo " ERROR: Could not fetch or copy ${fetch_file} for tomcat!!" && popd && checked_done 1 fi fi #NOTE: The truststore uses the java default password: "changeit" #Edit the server.xml file to contain proper location of certificates ((DEBUG)) && echo "Editing ${tomcat_install_dir}/conf/server.xml accordingly..." [[ ${tomcat_users_file} ]] && eval "perl -p -i -e 's#(?<=pathname=)\"([^\"]*)\"#\"${tomcat_users_file}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${truststore_file} ]] && eval "perl -p -i -e 's#(?<=truststoreFile=)\"([^\"]*)\"#\"${truststore_file}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${truststore_password} ]] && eval "perl -p -i -e 's#(?<=truststorePass=)\"([^\"]*)\"#\"${truststore_password}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${keystore_file} ]] && eval "perl -p -i -e 's#(?<=keystoreFile=)\"([^\"]*)\"#\"${keystore_file}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${keystore_password:-${ks_secret}} ]] && eval "perl -p -i -e 's#(?<=keystorePass=)\"([^\"]*)\"#\"${keystore_password:-${ks_secret}}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo -n "*" [[ ${keystore_alias} ]] && eval "perl -p -i -e 's#(?<=keyAlias=)\"([^\"]*)\"#\"${keystore_alias}\"#g' ${tomcat_install_dir}/conf/server.xml"; echo "*" add_my_cert_to_truststore --keystore-pass ${store_password} #clean up the password immediately after it is done being used keystore_password=${store_password} #don't want to do this but it is used by the esg-security script that installs the ORP unset store_password chown -R $tomcat_user $(readlink -f ${tomcat_install_dir}) [ $? != 0 ] && " ERROR: Could not change ownership of tomcat to \"$tomcat_user\" user" && popd && checked_done 1 chgrp -R $tomcat_group $(readlink -f ${tomcat_install_dir}) [ $? != 0 ] && " ERROR: Could not change group of tomcat to \"$tomcat_user\" user" && popd && checked_done 1 chown -R $tomcat_user $(readlink -f ${tomcat_conf_dir}) [ $? != 0 ] && " ERROR: Could not change ownership of esg's tomcat config dir to \"$tomcat_user\" user" && popd && checked_done 1 chgrp -R $tomcat_group $(readlink -f ${tomcat_conf_dir}) [ $? != 0 ] && " ERROR: Could not change group of esg's tomcat config dir to \"$tomcat_user\" user" && popd && checked_done 1 popd #>& /dev/null } generate_esgf_csrs(){ [ -z "${esgf_host}" ] && get_property esgf_host if [ $((sel & INDEX_BIT)) != 0 ]; then openssl req -new -nodes -config /etc/certs/openssl.cnf -keyout /etc/esgfcerts/cakey.pem -out /etc/esgfcerts/cacert_req.csr \ -subj "/O=ESGF/OU=ESGF.ORG/CN=$esgf_host-CA" echo "Successfully generated request for a simpleCA CA certificate: /etc/esgfcerts/cacert_req.csr" fi echo "You are strongly advised to obtain and install commercial CA issued certificates for the web container."; openssl req -new -nodes -config /etc/certs/openssl.cnf -keyout /etc/esgfcerts/hostkey.pem -out /etc/esgfcerts/hostcert_req.csr \ -subj "/O=ESGF/OU=ESGF.ORG/CN=$esgf_host" echo "Please mail the csr files for signing to Lukasz Lacinski , Prashanth Dwarakanath , or Sébastien Denvil "; echo "When you receive the signed certificate pack, untar all files into /etc/esgfcerts and execute esg-node --install-local-certs"; echo "If you also want to install the local certs for the tomcat web-container, execute esg-node --install-keypair /etc/esgfcerts/hostcert.pem /etc/esgfcerts/hostkey.pem"; echo "When prompted for the cachain file, specify /etc/esgfcerts/cachain.pem"; } generate_esgf_csrs_ext(){ echo "Are you requesting certs for an index-node or a datanode? (index/data)?"; while [ 1 ]; do read nd_type; if [ "$nd_type" != 'index' -a "$nd_type" != 'data' ]; then echo "Please specify index or data as node type"; continue; else break; fi done echo "Enter FQDN of node you are requesting certificates for"; read req_node_hostname; mkdir -p /etc/extcsrs 2>/dev/null; to_tar='hostkey.pem hostcert_req.csr' if [ "$nd_type" = "index" ]; then to_tar=${to_tar}" cacert_req.csr cakey.pem"; openssl req -new -nodes -config /etc/certs/openssl.cnf -keyout /etc/extcsrs/cakey.pem -out /etc/extcsrs/cacert_req.csr \ -subj "/O=ESGF/OU=ESGF.ORG/CN=$req_node_hostname-CA" echo "Successfully generated request for a simpleCA CA certificate: /etc/extcsrs/cacert_req.csr" fi echo -e "You are strongly advised to obtain and install commercial CA issued certificates for the web container." openssl req -new -nodes -config /etc/certs/openssl.cnf -keyout /etc/extcsrs/hostkey.pem -out /etc/extcsrs/hostcert_req.csr \ -subj "/O=ESGF/OU=ESGF.ORG/CN=$req_node_hostname" #echo "to tar: $to_tar"; pushd /etc/extcsrs; tar -czf $req_node_hostname.tgz $to_tar; popd; echo "A copy of the generated keys and CSRs has been saved as /etc/extcsrs/$req_node_hostname.tgz"; echo "Please mail the csr files for signing to Prashanth Dwarakanath or Nicolas Carenton "; echo "When you receive the signed certificate pack, untar all files into /etc/esgfcerts and execute esg-node --install-local-certs"; echo "If you also want to install the local certs for the tomcat web-container, execute esg-node --install-keypair /etc/esgfcerts/hostcert.pem /etc/esgfcerts/hostkey.pem"; echo "When prompted for the cachain file, specify /etc/esgfcerts/cachain.pem"; } cert_howto(){ cat < When prompted for the cachain file, specify the chain file provided by your CA If you wish to generate CSRs for a simpleCA CA certificate and/or web container certificate: esg-node --generate-esgf-csrs If you wish to generate CSRs for a node other than the one you are running, you could use esg-node --generate-esgf-csrs-ext If you wish to install ESGF certificates: 1. untar the entire contents of the tarball you received from Nicolas/Prashanth into /etc/esgfcerts 2. esg-node --install-local-certs 3. If you also have locally issued certificates for the webcontainer: esg-node --install-keypair /etc/esgfcerts/hostcert.pem /etc/esgfcerts/hostkey.pem When prompted for the cachain file, specify /etc/esgfcerts/cachain.pem 4. If you have certificates for your webcontainer, issued by a commercial CA, ensure you have the following: a) Certificate and key files. b) CA chain file. Ensure that your CA chain file is complete with this command: openssl verify -verbose -purpose sslserver -CAfile You should simply get a one line response that looks like this: If you have errors, your chain file is not complete. Contact your certificate provider for assistance, or email esgf_iwt@llnl.gov with 'Help needed with CA chainfile construction' in the subject line, attaching your public certificate (NOT KEY!!!) and the CA's certificate or the chain file that you have. EOF } install_local_certs(){ if [ $upgrade_mode -eq 1 ]; then return; fi if [ "$1" = "firstrun" ]; then flist=("cakey.pem" "cacert.pem" "hostcert.pem" "hostkey.pem" "myproxy-server.config") certdir=/etc/tempcerts; else if [ $((sel & INDEX_BIT)) != 0 ]; then flist=("cakey.pem" "cacert.pem" "hostcert.pem" "hostkey.pem") else flist=("hostcert.pem" "hostkey.pem") fi certdir=/etc/esgfcerts; fi cd $certdir; missingfiles=0; for fl in ${flist[@]}; do if [ ! -s $fl ]; then missingfiles=1; echo "File $fl is not found in $certdir; Please place it there and reexecute esg-node --install-local-certs"; fi done if [ $((sel & INDEX_BIT)) != 0 ]; then cert=cacert.pem localhash=`openssl x509 -in $cert -noout -hash`; globuspack=globus_simple_ca_${localhash}_setup-0.tar.gz if [ ! -s $globuspack ]; then missingfiles=1; echo "File $globuspack is not found in $certdir; Please place it there and reexecute esg-node --install-local-certs"; fi fi if [ $missingfiles -eq 1 ]; then return; fi cd $certdir if [ $((sel & INDEX_BIT)) != 0 ]; then cp cacert.pem /var/lib/globus-connect-server/myproxy-ca/cacert.pem cp cakey.pem /var/lib/globus-connect-server/myproxy-ca/private/cakey.pem cp $globuspack /var/lib/globus-connect-server/myproxy-ca/ cp $globuspack /etc/grid-security/certificates/ fi if [ -s hostkey.pem -a -s hostcert.pem ]; then cp host*.pem /etc/grid-security/ fi echo "Local installation of certs complete."; } setup_root_app() { [ -d "${tomcat_install_dir}/webapps/ROOT" ] && $(grep -q REFRESH ${tomcat_install_dir}/webapps/ROOT/index.html >& /dev/null) && echo "ROOT app in place... $([OK])" && return 0 echo "Oops, Don't see ESGF ROOT web application" [ -d "${tomcat_install_dir}/webapps/ROOT" ] && backup ${tomcat_install_dir}/webapps/ROOT echo echo "*******************************" echo "Setting up Apache Tomcat...(v${tomcat_version}) ROOT webapp" echo "*******************************" echo local root_app_dist_url=${esg_dist_url}/ROOT.tgz mkdir -p ${workdir} pushd ${workdir} >& /dev/null echo "Downloading ROOT application from ${root_app_dist_url}" checked_get ${root_app_dist_url} (( $? > 1 )) && echo " ERROR: Could not download ROOT app archive" && popd && checked_done 1 echo "unpacking ${root_app_dist_url##*/}..." tar xzf ${root_app_dist_url##*/} -C ${tomcat_install_dir}/webapps [ $? != 0 ] && echo " ERROR: Could not extract $(readlink -f ${root_app_dist_url##*/})" && popd && checked_done 1 [ -e ${tomcat_install_dir}/webapps/esgf-node-manager ] && cp ${tomcat_install_dir}/webapps/ROOT/index.html{.nm,} [ -e ${tomcat_install_dir}/webapps/esgf-web-fe ] && cp ${tomcat_install_dir}/webapps/ROOT/index.html{.fe,} chown -R ${tomcat_user} $(readlink -f ${tomcat_install_dir}/webapps/ROOT) chgrp -R ${tomcat_group} $(readlink -f ${tomcat_install_dir}/webapps/ROOT) echo "ROOT application \"installed\"" popd >& /dev/null } #Util "private" function for use **AFTER** tomcat has been configured!!!! #Reads tomcat's server.xml file at sets the appropriate vars based on contained values #Will *only* set global vars if it was successfully gleaned from server.xml. _glean_keystore_info() { if [ -r "${tomcat_install_dir}/conf/server.xml" ]; then debug_print "inspecting tomcat config file " echo -n "O" local value="" value=$(sed -n 's#.*keystoreFile="\([^ "]*\)".*$#\1#p' ${tomcat_install_dir}/conf/server.xml 2> /dev/null | egrep -v '@@') && echo -n "o" || echo -n "x" [ $? == 0 ] && [ -n "${value}" ] && keystore_file=${value} debug_print "keystore_file=${value}" unset value value=$(sed -n 's#.*keystorePass="\([^ "]*\)".*$#\1#p' ${tomcat_install_dir}/conf/server.xml 2> /dev/null | egrep -v '@@') && echo -n "." || echo -n "x" [ $? == 0 ] && [ -n "${value}" ] && keystore_password=${value} debug_print "keystore_password=$(md5sum <(echo "${value}") | awk '{print $1}')" unset value value=$(sed -n 's#.*keyAlias="\([^ "]*\)".*$#\1#p' ${tomcat_install_dir}/conf/server.xml 2> /dev/null | egrep -v '@@') && echo -n ":" || echo -n "x" [ $? == 0 ] && [ -n "${value}" ] && keystore_alias=${value} debug_print "keystore_alias=${value}" unset value value=$(sed -n 's#.*truststoreFile="\([^ "]*\)".*$#\1#p' ${tomcat_install_dir}/conf/server.xml 2> /dev/null | egrep -v '@@') && echo -n "-" || echo -n "x" [ $? == 0 ] && [ -n "${value}" ] && truststore_file=${value} debug_print "truststore_file=${value}" unset value value=$(sed -n 's#.*truststorePass="\([^ "]*\)".*$#\1#p' ${tomcat_install_dir}/conf/server.xml 2> /dev/null | egrep -v '@@') && echo -n ")" || echo -n "x" [ $? == 0 ] && [ -n "${value}" ] && truststore_password=${value} debug_print "truststore_password=$(md5sum <(echo "${value}") | awk '{print $1}')" unset value echo else echo "Could not glean values store... :-(" return 1 fi return 0 } test_tomcat() { echo echo "----------------------------" echo "Tomcat Test... " echo "----------------------------" echo start_tomcat tomcat_port_check && echo "Tomcat ports checkout $([OK])" || ([FAIL] && popd && checked_done 1) checked_done 0 } setup_slcs(){ read -e -p "Would you like to install the SLCS OAuth server on this node? [y/N] " choice choice=$(echo ${choice} | tr 'A-Z' 'a-z') if [ -z $choice ] ; then choice="n" fi if [ $choice == "y" ] ; then yum -y install ansible start_postgress export PGPASSWORD=`cat /esg/config/.esg_pg_pass` && createdb -U dbsuper slcsdb pushd /usr/local/src git clone https://github.com/ESGF/esgf-slcs-server-playbook.git chown -R apache:apache esgf-slcs-server-playbook pushd esgf-slcs-server-playbook if [ $devel -eq 1 ]; then git checkout devel; fi chgrp -R apache /var/lib/globus-connect-server/myproxy-ca/ chmod g+rx /var/lib/globus-connect-server/myproxy-ca/ chmod g+rx /var/lib/globus-connect-server/myproxy-ca/private chmod g+r /var/lib/globus-connect-server/myproxy-ca/private/cakey.pem sed -i "s/test_server_name/$(hostname --fqdn)/" playbook/overrides/production_venv_only.yml get_property mail_admin_address sed -i "s/test_email/$mail_admin_address/" playbook/overrides/production_venv_only.yml sed -i "s/abc123/$(cat /esg/config/.esg_pg_pass)/g" playbook/overrides/production_venv_only.yml write_as_property short_lived_certificate_server $esgf_host mkdir -p /usr/local/esgf-slcs-server chown -R apache:apache /usr/local/esgf-slcs-server ansible-playbook -i playbook/inventories/localhost -e "@playbook/overrides/production_venv_only.yml" playbook/playbook.yml popd popd fi } #called during setup_tds or directly by --set-idp-peer | --set-admin-peer flags select_idp_peer() { #---- #(Making this assignment for the sake of readability for the code below) #@@node_host_ip_address@@ #Token in file [ -z "${esgf_host_ip}" ] && get_property esgf_host_ip #just to be sure it's clear (though it should be) unset input read -e -p "Please Enter the public (i.e. routable) IP address of this host [${esgf_host_ip}]:> " input [ ! -z "${input}" ] && esgf_host_ip=${input} printf "\nUsing IP: ${esgf_host_ip}\n" unset input local default_myproxy_port=7512 local custom_myproxy_port="" #@@esgf_idp_peer_name@@ -> @@esgf_idp_peer@@ #Tokens in file #ESGF-PCMDI -> pcmdi3.llnl.gov/esgcet if [ $((sel & INDEX_BIT)) != 0 ]; then read -e -p "Do you wish to use an external IDP peer?(N/y):" input if [ "$input" = '' -o "$input" = 'n' -o "$input" = 'N' ]; then input='N'; esgf_idp_peer=$esgf_host esgf_idp_peer_name=`echo $esgf_idp_peer| tr '[a-z]' '[A-Z]'`; fi else input='y' fi if [ "$input" = "y" -o "$input" = "Y" ]; then read -e -p "Please specify your IDP peer node's FQDN:" input esgf_idp_peer_name=`echo $input| tr '[a-z]' '[A-Z]'`; esgf_idp_peer=$input fi myproxy_endpoint=$(echo ${esgf_idp_peer} | sed 's{\(.*://\)*\([^/]*\)[/]*.*{\2{') echo echo "Selection: [${choice}] source: ${esgf_host_ip} dest: ${esgf_idp_peer_name}:${esgf_idp_peer}" if [ "${esgf_host}" != "${esgf_idp_peer}" ]; then if ! check_for_group_intersection_with ${esgf_idp_peer} ; then printf " ---------------------------------------------------------------------- The IDP selected must share at least one of the peer group(s) [${node_peer_group}] that this node is a member of! run: esg-node --federation-sanity-check ${esgf_idp_peer} for confirmation. ---------------------------------------------------------------------- " local answer local default_answer="N" #always be safe. #read -e -p "Do you still wish to continue? [y/N] " answer #[ -n "${answer}" ] && answer=$(echo ${answer} | tr 'a-z' 'A-Z') || answer=${default_answer} #[ "${answer}" = "N" ] && exit 1 fi fi #---------------------- #Fetch and Insert the Certificate of IDP Peer (which is always co-located with myproxy - at least at present) #If you want to register to yourself... you already have your own certs trusted, no need to register #(this is what the call to add_my_cert_to_truststore does, called previously in configure_tomcat) if [ "${esgf_host}" != "${myproxy_endpoint}" ]; then register ${myproxy_endpoint} [ $? != 0 ] && echo " Error: could not import IDP Peer's certificate!" && checked_done 1 else #Unless you are going to force it, there is an assumption here that your truststore already has your cert installed, from when you configured tomcat if ((force_install)) ; then add_my_cert_to_truststore && echo ":-)" || echo ":-(" fi fi write_as_property esgf_idp_peer_name write_as_property esgf_idp_peer #---- write_as_property myproxy_endpoint #this is used to set the registration.xml's "adminPeer" value (revisit this.. I should be more consistent with naming) write_as_property myproxy_port ${custom_myproxy_port:-${default_myproxy_port}} #---------------------- write_tds_env } #TODO - currently an orphaned function... make this a part of a larger configuration sanity check routine... sanity_check_web_xmls() { #---- #Editing web.xml files for projects who use the authorizationService #---- echo "sanity checking webapps' web.xml files accordingly... " echo " |--setting ownership of web.xml files... to ${tomcat_user}.${tomcat_group}" find ${tomcat_install_dir}/webapps | egrep -e '/web.xml$' | xargs chown ${tomcat_user}.${tomcat_group} echo " |--inspecting web.xml files for proper authorization service assignment... " local web_xml_files=($(find ${tomcat_install_dir}/webapps | egrep -e '/web.xml$' | xargs grep /saml/soap/secure/authorizationService.htm | awk '{print $1}' | sort -u | sed 's@:@@')) sed -i.bak 's@\(https://\)[^/]*\(/esg-orp/saml/soap/secure/authorizationService.htm[,]*\)@\1'${esgf_host}'\2@g' ${web_xml_files[@]} #-----check that all trustoreFile params are set appropriately------- local param=trustoreFile local pname=${truststore_file} sed -i.bak '/[ ]*'${param}'[ ]*/,/<\/param-value>/ s#\(\)[ ]*[^<]*[ ]*\(\)#\1'${pname}'\2#' ${web_xml_files[@]} #-------------------------------------------------------------------- local instruct_to_reboot=0 local ret=0; for((i=0;i<${#web_xml_files[@]};i++)); do ((DEBUG)) && echo -n " inspecting $(readlink -f ${web_xml_files[i]}) " diff ${web_xml_files[${i}]}{,.bak} >& /dev/null ret=$? ((instruct_to_reboot+=$ret)) ((ret > 0)) && echo -n "-" || echo -n "*" ((DEBUG)) && echo done ((instruct_to_reboot)) && \ printf " ------------------------------------------------------------------------------------------------- webapp web.xml files have been modified - you must restart node stack for changes to be in effect (esg-node restart) ------------------------------------------------------------------------------------------------- " || echo } ##### # THREDDS Data Server ##### get_webxml_file() { #Get the templated web.xml file... (with tokens for subsequent filter entries: see [esg-]security-[token|tokenless]-filters[.xml] files) fetch_file=web.xml # 3rd and 4th args mean we force download and keep a backup of the file. checked_get ${tomcat_install_dir}/webapps/thredds/WEB-INF/${fetch_file}.tmpl ${esg_dist_url}/thredds/thredds.${fetch_file} 1 1 local ret=$? (( ret > 1 )) && popd && checked_done 1 if (( ret == 0 )); then debug_print "new template file detected: swapping files..." cp -vf ${tomcat_install_dir}/webapps/thredds/WEB-INF/${fetch_file}{,.bak} cp -vf ${tomcat_install_dir}/webapps/thredds/WEB-INF/${fetch_file}{.tmpl,} fi chown -R ${tomcat_user} ${tomcat_install_dir}/webapps/thredds/WEB-INF/${fetch_file}{,.tmpl} chgrp -R ${tomcat_group} ${tomcat_install_dir}/webapps/thredds/WEB-INF/${fetch_file}{,.tmpl} } setup_tds() { echo -n "Checking for thredds (tds) >= ${tds_min_version} " check_webapp_version "thredds" ${tds_min_version} "Implementation-Version" local ret=$? # ((ret == 0)) && (( ! force_install )) && [OK] && return 0 echo echo "*******************************" echo "Setting up Thredds Data Server... v${tds_version}" echo "*******************************" echo local upgrade=${1:-0} local default="Y" local dosetup if [ -d ${tomcat_install_dir}/webapps/thredds ]; then echo "Detected an existing thredds installation..." read -e -p "Do you want to continue with thredds installation and setup? [Y/n] " dosetup [ -z "${dosetup}" ] && dosetup=${default} if [ "${dosetup}" == "N" ] || [ "${dosetup}" == "n" ]; then echo "Skipping thredds installation and setup - will assume thredds is setup properly" return 0 fi echo fi mkdir -p ${workdir} [ $? != 0 ] && return 1 pushd ${workdir} >& /dev/null local fetch_file ############################ #Download the thredds.war file home site (or use value of thredds_dist_file if already set) ############################ thredds_dist_file=${thredds_dist_file:-${thredds_dist_url##*/}} #There is this pesky case of having a zero sized dist file... if [ -e ${thredds_dist_file} ]; then ls -l ${thredds_dist_file} local size=$(stat -c%s ${thredds_dist_file} >& /dev/null) (( size == 0 )) && rm -v ${thredds_dist_file} fi #Check to see if we have this war file already in the workbench... #if [ ! -e ${thredds_dist_file} ]; then # wget -O ${thredds_dist_file} ${thredds_dist_url} # if [ $? != 0 ]; then # echo " ERROR: Could not download ${thredds_dist_url},... fetching the copy at PCMDI (LLNL)..." checked_get ${thredds_dist_file} ${thredds_esg_dist_url} $((force_install)) (( $? > 1 )) && echo " ERROR: Could not download ${thredds_esg_dist_url} either" && popd && checked_done 1 # fi #fi stop_tomcat #------------------------------------------- #installing the thredds web app ("manually") //zoiks echo "Installing thredds app..." mkdir -p ${tomcat_install_dir}/webapps pwd cp -v $(readlink -f ${thredds_dist_file}) ${tomcat_install_dir}/webapps/ pushd ${tomcat_install_dir}/webapps [ $? != 0 ] && echo " Error: could not go to ${tomcat_install_dir}/webapps directory be sure tomcat is installed!" && checked_done 1 mkdir -p thredds cd thredds #move the current web.xml file out of the way local webapp_config_file=${tomcat_install_dir}/webapps/thredds/WEB-INF/web.xml if ((upgrade)) && [ -e ${webapp_config_file} ]; then cp -v ${webapp_config_file} ${webapp_config_file}.saved fi jar xf ../${thredds_dist_file} cd .. chown -R ${tomcat_user} thredds* chgrp -R ${tomcat_group} thredds* rm ${thredds_dist_file} if ((upgrade)) ; then local version_property="Implementation-Version" local current_version=$(sed -n '/^'${version_property}':[ ]*\(.*\)/p' ${tomcat_install_dir}/webapps/thredds/META-INF/MANIFEST.MF | awk '{print $2}' | xargs 2> /dev/null) cp -v ${webapp_config_file}.saved ${webapp_config_file} && echo "Thredds upgraded to ${current_version} $([OK])" && popd >& /dev/null && return 0 fi popd >& /dev/null #------------------------------------------- ############################ #Setup Digest authentication ############################ pushd ${tomcat_conf_dir} >& /dev/null fetch_file=tomcat-users.xml checked_get $tomcat_conf_dir/${fetch_file} ${esg_dist_url}/externals/bootstrap/${fetch_file} $((force_install)) (( $? > 1 )) && popd && checked_done 1 chown ${tomcat_user} $tomcat_conf_dir/${fetch_file} chgrp ${tomcat_group} $tomcat_conf_dir/${fetch_file} #1: Generate password hash printf "Create user credentials\n" local input while [ 1 ]; do #default credential values... local username="dnode_user" local password=${security_admin_password:-"changeme"} unset input unset addanother read -e -p "Please enter username for tomcat [${username}]: " input [ ! -z "${input}" ] && username=${input} echo ${username} unset input read -e -s -t60 -p "Please enter password for user, \"${username}\" [********]: " input [ ! -z "${input}" ] && password=${input} password_hash=$($tomcat_install_dir/bin/digest.sh -a SHA ${password} | cut -d ":" -f 2) echo ${password_hash} #Escaping $ signs between password hash and salt in tomcat 8 password_hash=$(sed 's/\$/\\$/g' <<< $password_hash) #Create user entry in tomcat-users.xml for thredds user user_entry="" #Note: Have to escape the last "/" in "/>" #Insert the entry in the right place in tomcat-users.xml #Replace with ${user_entry}\n #Command Line:% perl -p -i -e 's//\n /g' tomcat-users.xml eval "perl -p -i -e 's//${user_entry}\n /g' tomcat-users.xml" read -e -p "Would you like to add another user? [y/N]: " addanother if [ "${addanother}" = "y" ] || [ "${addanother}" = "Y" ]; then echo continue fi echo break done unset input popd >& /dev/null ############################ #Enable SSL encryption ############################ mkdir -p ${tomcat_conf_dir}/Catalina/localhost fetch_file=thredds.xml checked_get $tomcat_conf_dir/Catalina/localhost/${fetch_file} ${esg_dist_url}/externals/bootstrap/tomcat-${fetch_file} $((force_install)) (( $? > 1 )) && echo " ERROR: Problem pulling down ${fetch_file} from esg distribution" && popd && checked_done 1 # Getting TDS web.xml template file. get_webxml_file select_idp_peer #See thredds_content_dir set in init() - default value is ${esg_root_dir}/content echo "Please set the thredds content directory to: ${thredds_content_dir} in the following setup" #-------- #-------- #HACK ALERT!! For some reason the public directory does not respect thredds' tds.context.root.path property... # So have to manually move over this directory to avert server not starting! -gavin local created_brand_new_thredds_content_dir=0 if [ ! -d ${thredds_content_dir}/thredds ] || ((force_install)); then mkdir -p ${thredds_content_dir}/thredds cp -R ${tomcat_install_dir}/webapps/thredds/WEB-INF/altContent/startup/public ${thredds_content_dir}/thredds chown -R ${tomcat_user} ${thredds_content_dir} chmod -R 755 ${thredds_content_dir} created_brand_new_thredds_content_dir=1 fi #-------- #Getting ESGF specific appplicationContext.xml file checked_get ${tomcat_install_dir}/webapps/thredds/WEB-INF/applicationContext.xml ${esg_dist_url}/thredds/applicationContext.xml #Getting jars needed by authorizationFilter, authenticationFilter and authenticationByIPFilter since TDS5 migration checked_get ${tomcat_install_dir}/webapps/thredds/WEB-INF/lib/jdom-legacy-1.1.3.jar ${esg_dist_url}/filters/jdom-legacy-1.1.3.jar checked_get ${tomcat_install_dir}/webapps/thredds/WEB-INF/lib/commons-httpclient-3.1.jar ${esg_dist_url}/filters/commons-httpclient-3.1.jar checked_get ${tomcat_install_dir}/webapps/thredds/WEB-INF/lib/commons-lang-2.6.jar ${esg_dist_url}/filters/commons-lang-2.6.jar #Here we want to make sure that there is the file - Ex: /esg/content/thredds/threddsConfig.xml exists. #If not go get it... (needed, since if the file is not there then re-init will not work - apparently) local thredds_config_file_template_url=${esg_dist_url}/thredds/threddsConfig.xml.tmpl local thredds_config_file_template=${thredds_config_file_template_url##*/} local thredds_config_file=${thredds_config_file_template%.*} if [ ! -e ${thredds_config_file} ]; then mkdir -p ${thredds_content_dir}/thredds checked_get ${thredds_content_dir}/thredds/${thredds_config_file} ${thredds_config_file_template_url} $((force_install)) fi get_property mail_admin_address sed -i "s/support@my.group/$mail_admin_address/g" ${thredds_content_dir}/thredds/threddsConfig.xml #restart tomcat to put modifications in effect. stop_tomcat start_tomcat start_postgress #set in cdms setup (prerequisite) echo "$cdat_home/bin/esgsetup $( ((${recommended} == 1 )) && echo "--minimal-setup" ) --thredds --publish --gateway ${myproxy_endpoint} $([ -n "${security_admin_password}" ] && echo "--thredds-password ********")" source ${cdat_home}/bin/activate esgf-pub ESGINI=${publisher_home}/${publisher_config} esgsetup $( ((${recommended} == 1 )) && echo "--minimal-setup" ) --thredds --publish --gateway ${myproxy_endpoint} $([ -n "${security_admin_password}" ] && echo "--thredds-password ${security_admin_password}") ((created_brand_new_thredds_content_dir)) && chown -R ${tomcat_user}:${tomcat_group} ${thredds_content_dir} popd >& /dev/null echo mkdir -p ${thredds_root_dir} [ $? != 0 ] && echo " Error: could not create ${thredds_root_dir}" && checked_done 1 chown ${installer_uid}:${installer_gid} ${thredds_root_dir} >& /dev/null mkdir -p ${thredds_replica_dir} [ $? != 0 ] && echo " Error: could not create ${thredds_replica_dir}" && checked_done 1 chown ${installer_uid}:${installer_gid} ${thredds_replica_dir} >& /dev/null echo #Set ownership of esgcet "thredds content" directory to installation owner if [ ! -d ${thredds_content_dir}/thredds/esgcet ] || ((force_install)); then mkdir -p ${thredds_content_dir}/thredds/esgcet echo "chown -R ${installer_uid}:${installer_gid} ${thredds_content_dir}/thredds/esgcet" chown -R ${installer_uid}:${installer_gid} ${thredds_content_dir}/thredds/esgcet [ $? != 0 ] && echo "WARNING: Could not change owner successfully - this will lead to unability to create new catalogs via the publisher!" fi echo "curl http://localhost/thredds" local wait_time=5 local ret=1 while [[ $wait_time > 0 ]]; do curl http://localhost/thredds >& /dev/null ret=$? [ $ret == 0 ] && break sleep 1 : $((wait_time--)) done #append_esgf_log4j_entries ${tomcat_install_dir}/webapps/thredds/WEB-INF/log4j.xml [ $? != 0 ] && [FAIL] && echo " ERROR: Not able to contact thredds page on this server" && checked_done 1 [OK] write_tds_env write_tds_install_log source deactivate checked_done 0 } write_tds_env() { ((show_summary_latch++)) echo "export ESGF_IDP_PEER_NAME=${esgf_idp_peer_name}" >> ${envfile} echo "export ESGF_IDP_PEER=${esgf_idp_peer}" >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } write_tds_install_log() { echo "$(date ${date_format}) webapp:thredds=${tds_version} ${tomcat_install_dir}/webapps/${thredds_dist_file%.*}" >> ${install_manifest} write_as_property thredds_service_endpoint "http://${esgf_host}/${thredds_dist_file%.*}" write_as_property thredds_service_app_home ${tomcat_install_dir}/webapps/${thredds_dist_file%.*} dedup ${install_manifest} return 0 } test_tds() { echo echo "----------------------------" echo "Thredds Data Server Test... (publisher catalog gen)" echo "----------------------------" echo mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null start_tomcat start_postgress echo "$cdat_home/bin/esgpublish --service fileservice --use-existing pcmdi.${esg_root_id}.${node_short_name}.test.mytest --noscan --thredds" $cdat_home/bin/esgpublish --service fileservice --use-existing pcmdi.${esg_root_id}.${node_short_name}.test.mytest --noscan --thredds [ $? != 0 ] && [FAIL] && echo " ERROR: Not able to run esgpublish command" && popd && checked_done 1 sleep 2 echo "curl http://${esgf_host_ip}/thredds" curl http://${esgf_host_ip}/thredds [ $? != 0 ] && [FAIL] && echo " ERROR: Not able to contact thredds page on this server" && popd && checked_done 1 [OK] popd >& /dev/null echo echo checked_done 0 } tds_startup_hook() { echo -n "TDS (THREDDS) Startup Hook: Setting perms... " chown -R ${tomcat_user} ${thredds_content_dir} [ $? != 0 ] && echo_fail ":-(" && return 1 echo_ok ":-)" } ##### # Globus Toolkit -> MyProxy (client) & GridFTP (server) ##### # Takes arg # The rest of the args are the following... # for data-node configuration (GridFTP stuff): ["bdm"|"end-user"] see esg-globus script # for idp configuration (MyProxy stuff): [gen-self-cert] | [fetch-certs|gen-self-cert|keep-certs] | ["install"|"update"] my_setup_globus() { local sel=${1:-${sel:-0}} debug_print "my_setup_globus for sel type ${sel}" ((sel == 0)) && echo "my_setup_globus: no selection set, returning (1)" && return 1 shift mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null pushd ${scripts_dir} >& /dev/null local fetch_file=esg-globus verbose_print "checked_get ./${fetch_file} ${esg_dist_url}/${server_dir}/${fetch_file} $((force_install))" checked_get ./${fetch_file} ${esg_dist_url}/externals/bootstrap/${fetch_file} $((force_install)) (( $? > 1 )) && popd && return 1 chmod 755 ${fetch_file} popd >& /dev/null local directive="notype" local ret=1 if [ $((sel & DATA_BIT)) != 0 ]; then echo -n "Globus Setup for Data-Node... (GridFTP server) " directive="datanode" pushd ${workdir} >& /dev/null (source ${scripts_dir}/${fetch_file} && setup_globus_services "${directive}" $@) ret=$? popd >& /dev/null [ ${ret} = 0 ] && write_globus_env || checked_done 1 touch ${globus_location}/esg_${progname}_installed fi ret=1 if [ $((sel & IDP_BIT)) != 0 ]; then echo -n "Globus Setup for Index-Node... (MyProxy server) " directive="gateway" pushd ${workdir} >& /dev/null local setup_mode="update" [ $((sel & INSTALL_BIT)) != 0 ] && setup_mode="install" (source ${scripts_dir}/${fetch_file} && setup_globus_services "${directive}" $@ "${setup_mode}") ret=$? popd >& /dev/null [ ${ret} = 0 ] && write_globus_env || checked_done 1 touch ${globus_location}/esg_${progname}_installed fi return 0 } write_globus_env() { ((show_summary_latch++)) echo "export GLOBUS_LOCATION=$GLOBUS_LOCATION" >> ${envfile} dedup ${envfile} && source ${envfile} return 0 } test_globus() { local sel=${1:-${sel:-0}} ((sel ==0)) && echo "test_globus: no selection set, returning (1)" && return 1 shift local ret=1 local directive="notype" if [ $((sel & DATA_BIT)) != 0 ]; then echo "Testing Globus Services for Data-Node... (GridFTP server(s)) : [$@]" directive="datanode" mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null (source ${scripts_dir}/esg-globus && test_globus_services "${directive}" $@) ret=$? popd >& /dev/null fi ret=1 if [ $((sel & IDP_BIT)) != 0 ]; then echo "Testing Globus Services for Index-Node... (MyProxy server)" directive="gateway" mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null (source ${scripts_dir}/esg-globus && test_globus_services "${directive}" $@) ret=$? popd >& /dev/null fi return 0 } # Starts the globus services by delegating out to esg-globus script # arg1 selection bit vector ($sel) # args* (in the context of "data" node -> ["bdm"|"end-user"]) start_globus() { local sel=${1:-${sel:-0}} ((sel == 0)) && echo "start_globus: no selection set, returning (1)" && return 1 shift local data_node_ret=1 local directive="notype" if [ $((sel & DATA_BIT)) != 0 ]; then echo "Starting Globus Services for Data-Node... (GridFTP server(s)) : [$@]" directive="datanode" mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null (source ${scripts_dir}/esg-globus && start_globus_services "${directive}" $@) data_node_ret=$? [ ${data_node_ret} != 0 ] && echo "Could Not Start Globus ESGF Node related services (GridFTP)" popd >& /dev/null fi local idp_node_ret=1 if [ $((sel & IDP_BIT)) != 0 ]; then echo "Starting Globus Services for IDP-Node... (MyProxy server)" directive="gateway" mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null (source ${scripts_dir}/esg-globus && start_globus_services "${directive}" $@) idp_node_ret=$? [ ${idp_node_ret} != 0 ] && echo "Could Not Start Globus IDP Node related services (MyProxy)" popd >& /dev/null fi } # Stops the globus services by delegating out to esg-globus script # arg1 selection bit vector ($sel) stop_globus() { local sel=${1:-0} ((sel ==0)) && echo "stop_globus: no selection set, returning (1)" && return 1 shift local ret=1 local directive="notype" if [ $((sel & DATA_BIT)) != 0 ]; then echo -n "Stopping Globus Services for Data-Node... (GridFTP) " directive="datanode" mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null (source ${scripts_dir}/esg-globus && stop_globus_services "${directive}" $@) ret=$? popd >& /dev/null fi if [ $((sel & IDP_BIT)) != 0 ]; then echo -n "Stopping Globus Services for Index-Node... (MyProxy server) " directive="gateway" mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null (source ${scripts_dir}/esg-globus && stop_globus_services "${directive}" $@) ret=$? popd >& /dev/null fi } ##### # Test Publication ##### test_publication() { echo echo "----------------------------" echo "Publication test... $@" echo "----------------------------" echo [ -z "${myproxy_user}" ] && read -e -p "Enter your myproxy username: " myproxy_user personal_credential_repo=${personal_credential_repo:-"$HOME/.globus"} mkdir -p ${personal_credential_repo} chown -R ${installer_uid}:${installer_gid} ${personal_credential_repo} rm -rf ${personal_credential_repo}/esgf_credentials >& /dev/null local _X509_CERT_DIR=${personal_credential_repo}/esgf_credentials local _X509_USER_KEY=${personal_credential_repo}/esgf_credentials local _X509_USER_CERT=${personal_credential_repo}/esgf_credentials echo "ESGINI = [${ESGINI}]" echo "----------" echo "(env set locally for this test publication call only! [call to myproxy])" echo "X509_CERT_DIR = [${_X509_CERT_DIR}]" echo "X509_USER_KEY = [${_X509_USER_KEY}]" echo "X509_USER_CERT = [${_X509_USER_CERT}]" echo "----------" echo "myproxy-logon -s $myproxy_endpoint -l $myproxy_user -p $myproxy_port -o ${personal_credential_repo}/certificate-file -T" X509_CERT_DIR=${_X509_CERT_DIR} \ X509_USER_KEY=${_X509_USER_KEY} \ X509_USER_CERT=${_X509_USER_CERT} \ myproxy-logon -s $myproxy_endpoint -l $myproxy_user -p $myproxy_port -o ${personal_credential_repo}/certificate-file -T [ $? != 0 ] && echo " ERROR: MyProxy not setup properly. Unable to execute command." && return 1 chown -R ${installer_uid}:${installer_gid} ${personal_credential_repo} local index_peer=$(sed -n 's@^[^#]*[ ]*hessian_service_url[ ]*=[ ]*\(.*publishingService\)$@\1@p' ${esg_root_dir}/config/esgcet/esg.ini | sed -n 's@http[s]*://\([^/]*\)/.*@\1@p') echo "(target index node => ${index_peer} $(sed -n 's@^[^#]*[ ]*hessian_service_url[ ]*=[ ]*\(.*\)$@\1@p' ${esg_root_dir}/config/esgcet/esg.ini | grep -q esg-search && echo "(P2P)") )" #Publish the dataset from the THREDDS catalog created above... echo "$cdat_home/bin/esgpublish --service fileservice --use-existing pcmdi.${esg_root_id}.${node_short_name}.test.mytest --noscan --publish" $cdat_home/bin/esgpublish --service fileservice --use-existing pcmdi.${esg_root_id}.${node_short_name}.test.mytest --noscan --publish [ $? != 0 ] && echo " ERROR: unable to successfully execute esgpublish" && return 1 sleep 3 [ "$1" = "no-unpublish" ] && echo "Leaving test publication file published" && return 1 echo "$cdat_home/bin/esgunpublish --skip-thredds pcmdi.${esg_root_id}.${node_short_name}.test.mytest" $cdat_home/bin/esgunpublish --skip-thredds pcmdi.${esg_root_id}.${node_short_name}.test.mytest [ $? != 0 ] && echo " ERROR: unable to successfully execute esgunpublish" && return 1 return 0 } #NOTE: I am in the midst of refactoring some of the update/download code... it may very well be the case that I # call update_script inside of setup_subsystem... it remains to be seen how much of the semantics of update I want # to enforce and how that informs what can be done in setup. -gavin #arg (1) - name of installation script root name. Ex:security which resolves to script file esg-security #arg (2) - directory on the distribution site where script is fetched from Ex: orp #usage: update_script security orp - looks for the script esg-security in the distriubtion directory "orp" update_script() { local subsystem=$1 [ -z "${subsystem}" ] && echo "update_script [${subsystem}] requires argument!!" && checked_done 1 local server_dir=${2:-"esgf-${subsystem}"} local subsystem_install_script=${scripts_dir}/esg-${subsystem} [ ! -e "${subsystem_install_script}" ] && echo "Sorry, could not find script esg-${subsystem} to update" && return 1 pushd ${scripts_dir} >& /dev/null [ $? != 0 ] && echo "ERROR: Not able to enter scripts directory" && return 2 local fetch_file=esg-${subsystem} verbose_print "checked_get ./${fetch_file} ${esg_dist_url}/${server_dir}/${fetch_file} $((force_install))" checked_get ./${fetch_file} ${esg_dist_url}/${server_dir}/${fetch_file} $((force_install)) local ret=$? (( $ret > 1 )) && popd && return 1 chmod 755 ${fetch_file} popd >& /dev/null return 0 } #NOTE: Here we are enforcing a bit of a convention... The name of #subsystem files must be in the form of esg-xxx-xxx where the script #contains its "main" function named setup_xxx_xxx(). The string passed #to this function is "xxx-xxx" # #arg (1) - name of installation script root name. Ex:security which resolves to script file esg-security #arg (2) - directory on the distribution site where script is fetched from Ex: orp #usage: setup_subsystem security orp - looks for the script esg-security in the distriubtion dir orp setup_subsystem() { local subsystem=$1 [ -z "${subsystem}" ] && echo "setup_subsystem [${subsystem}] requires argument!!" && checked_done 1 local server_dir=${2:?"Must provide the name of the distribution directory where subsystem script lives - perhaps ${subsystem}?"} local subsystem_install_script=${scripts_dir}/esg-${subsystem} #--- #check that you have at one point in time fetched the subsystem's installation script #if indeed you have we will assume you would like to proceed with setting up the latest... #Otherwise we just ask you first before you pull down new code to your machine... #--- #local default="Y" #((force_install)) && default="Y" #local dosetup #if [ ! -e ${subsystem_install_script} ] || ((force_install)) ; then # echo # read -e -p "Would you like to set up ${subsystem} services? $([ "$default" = "N" ] && echo "[y/N]" || echo "[Y/n]") " dosetup # [ -z "${dosetup}" ] && dosetup=${default} # if [ "${dosetup}" = "N" ] || [ "${dosetup}" = "n" ] || [ "${dosetup}" = "no" ]; then # return 0 # fi #fi echo echo "-------------------------------" echo "LOADING installer for ${subsystem}... " mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null pushd ${scripts_dir} >& /dev/null local fetch_file=esg-${subsystem} verbose_print "checked_get ./${fetch_file} ${esg_dist_url}/${server_dir}/${fetch_file} $((force_install))" checked_get ./${fetch_file} ${esg_dist_url}/${server_dir}/${fetch_file} $((force_install)) local ret=$? (( $ret > 1 )) && popd && return 1 chmod 755 ${fetch_file} popd >& /dev/null #source subsystem file and go! shift && debug_print "-->>> " [ -n "${server_dir}" ] && shift && debug_print "-->>> " debug_print "source ${scripts_dir}/${fetch_file} && setup_${subsystem//'-'/_} ${upgrade_mode} $@" (source ${scripts_dir}/${fetch_file} && verbose_print ":-) " && setup_${subsystem//'-'/_} ${upgrade_mode} $@ ) checked_done $? echo "-------------------------------" echo echo } # New version of setup_subsystem which executes the subscript without sourcing it setup_subsystem_new() { local subsystem=$1 [ -z "${subsystem}" ] && echo "setup_subsystem [${subsystem}] requires argument!!" && checked_done 1 local server_dir=${2:?"Must provide the name of the distribution directory where subsystem script lives - perhaps ${subsystem}?"} echo echo "-------------------------------" echo "LOADING installer for ${subsystem}... " mkdir -p ${workdir} [ $? != 0 ] && checked_done 1 pushd ${workdir} >& /dev/null pushd ${scripts_dir} >& /dev/null local fetch_file=esg-${subsystem} verbose_print "checked_get ./${fetch_file} ${esg_dist_url}/${server_dir}/${fetch_file} $((force_install))" checked_get ./${fetch_file} ${esg_dist_url}/${server_dir}/${fetch_file} $((force_install)) local ret=$? (( $ret > 1 )) && popd && return 1 chmod 755 ${fetch_file} popd >& /dev/null shift && debug_print "-->>> " [ -n "${server_dir}" ] && shift && debug_print "-->>> " # Check version vercomp `${scripts_dir}/${fetch_file} --newversion` `${scripts_dir}/${fetch_file} --oldversion` [ $? == 0 ] || [ $? == 2 ] && (( ! force_install )) && [OK] && return 0 # Go! ${scripts_dir}/${fetch_file} checked_done $? echo "-------------------------------" echo echo } ##### # Show user summary and environment variables that have been set ##### show_summary() { if [ $((show_summary_latch == 0)) = 1 ]; then return 0; fi echo echo "-------------------" echo " esgf node run summary: " echo "-------------------" echo "The following environment variables were used during last full install" echo "They are written to the file ${envfile}" echo "Please source this file when using these tools" echo cat ${envfile} echo "-------------------" echo "Installation Log:" echo cat ${install_manifest} echo "-------------------" echo return 0 } write_env() { echo "Generating default ${envfile} file" echo "modified" [ -e ${envfile} ] && cp ${envfile} ${envfile}.bak cat /dev/null > ${envfile} write_paths write_git_env write_java_env write_ant_env write_postgress_env write_esgcet_env write_tomcat_env write_tds_env write_globus_env echo 'source /usr/local/conda/bin/activate esgf-pub' >> ${envfile} dedup ${envfile} echo "-------------------" cat ${envfile} echo "-------------------" return 0 } ############################################ # Certificate Management Utility Functions ############################################ _relocate_n_relink_globus_certicates_conf() { (( (sel & IDP_BIT) != 0 )) && echo "Peforming additional checks (IDP node presumes simpleCA installation and configuration)" || return 0 local grid_security_dir=${globus_global_certs_dir%/*} pushd ${grid_secuirty_dir} >& /dev/null local globus_ca_config_files=(globus-host-ssl.conf globus-user-ssl.conf grid-security.conf) local link target local bad_link_count local file echo "Checking for orphaned symlinks... (${#globus_ca_config_files[@]} files) " for file in ${globus_ca_config_files[@]}; do echo -n ${file} if [[ -e ${file} ]] && [[ -h ${file} ]] && [[ -f $(readlink -f ${file}) ]] ; then debug_print "$file..." else ((bad_link_count++)) fi echo done echo ((bad_link_count > 0)) && (source ${scripts_dir}/esg-globus && simpleCA_relink $@); unset file for file in ${globus_ca_config_files[@]}; do if [ -h "${file}" ]; then #------ #Will not throw away a perfectly good regex (hard to find out how to escape ' !) #read link target <<< $(stat ${file} | sed -n 's/[ ]*File:[ ]*`\(.*\)[\"'\''].*->.*`\(.*\)[\"'\''].*/\1 \2/p') #------ link=${file} target=$(readlink ${file}) ( [ -e "./${target##*/}" ] || [ ! -e "${target}" ] ) && continue echo -n "Relinking... ${link} -->> ${target##*/} " unlink ${link} debug_print "\nlink = $link" debug_print "target = $target" debug_print "\cp ${target} ${grid_security_dir}/${target##*/}" \cp ${target} ${grid_security_dir}/${target##*/} >& /dev/null debug_print "ln -sf ${target##*/} ${link} " ln -sf ${target##*/} ${link} [ $? == 0 ] && [OK] || [FAIL] fi done popd >& /dev/null } #Goes to ESG distribution server and pulls down all certificates for the federation. #(suitable for crontabbing) fetch_esgf_certificates() { echo echo "Fetching Freshest ESG Federation Certificates..." #_relocate_n_relink_globus_certicates_conf globus_global_certs_dir=/etc/grid-security/certificates [ -d ${globus_global_certs_dir} ] && tar czf ${globus_global_certs_dir%/*}/${globus_global_certs_dir##*/}.bak.tgz ${globus_global_certs_dir} >& /dev/null && rm ${globus_global_certs_dir}/* mkdir -p ${globus_global_certs_dir} [ $? != 0 ] && echo "Could not create directory: [${globus_global_certs_dir}] :-(" && return 1 local esg_trusted_certs_file=esg_trusted_certificates.tar if [ "$node_peer_group" = "esgf-test" ]; then debug_print "curl -s -L --insecure ${esg_dist_url_root}/certs/test-federation/${esg_trusted_certs_file} | (cd ${globus_global_certs_dir}; pax -r -s ',.*/,,p')" curl -s -L --insecure ${esg_dist_url_root}/certs/test-federation/${esg_trusted_certs_file} | (cd ${globus_global_certs_dir}; pax -r -s ',.*/,,p') else debug_print "curl -s -L --insecure ${esg_dist_url_root}/certs/${esg_trusted_certs_file} | (cd ${globus_global_certs_dir}; pax -r -s ',.*/,,p')" curl -s -L --insecure ${esg_dist_url_root}/certs/${esg_trusted_certs_file} | (cd ${globus_global_certs_dir}; pax -r -s ',.*/,,p') fi local ret=$? rmdir ${globus_global_certs_dir}/$(echo ${esg_trusted_certs_file} | awk 'gsub(/('$compress_extensions')/,"")') if [ $ret == 0 ]; then [ -e ${globus_global_certs_dir%/*}/${globus_global_certs_dir##*/}.bak.tgz ] && rm ${globus_global_certs_dir%/*}/${globus_global_certs_dir##*/}.bak.tgz fi local simpleCA_cert=$(readlink -f $(grep certificate_issuer_cert "${esg_root_dir}/config/myproxy/myproxy-server.config" 2> /dev/null | awk '{print $2}' | tr -d '\"') 2> /dev/null) if [ -n "${simpleCA_cert}" ]; then local simpleCA_cert_hash=$(openssl x509 -noout -in ${simpleCA_cert} -hash) echo "checking for MY cert: ${globus_global_certs_dir}/${simpleCA_cert_hash}.0" [ -e "${globus_global_certs_dir}/${simpleCA_cert_hash}.0" ] && ((!force_install)) && echo "Local CA cert file detected.... $([OK])" && return 0 echo "Integrating in local simpleCA_cert... " debug_print "Local SimpleCA Root Cert: ${simpleCA_cert}" debug_print "Extracting Signing policy command: tar xvzfO ${simpleCA_cert%/*}/globus_simple_ca_${simpleCA_cert_hash}_setup*.tar.gz globus_simple_ca_${simpleCA_cert_hash}_setup-*/${simpleCA_cert_hash}.signing_policy > ${globus_global_certs_dir}/${simpleCA_cert_hash}.signing_policy" (cp -v ${simpleCA_cert} ${globus_global_certs_dir}/${simpleCA_cert_hash}.0 && \ tar xvzfO ${simpleCA_cert%/*}/globus_simple_ca_${simpleCA_cert_hash}_setup*.tar.gz globus_simple_ca_${simpleCA_cert_hash}_setup-*/${simpleCA_cert_hash}.signing_policy > ${globus_global_certs_dir}/${simpleCA_cert_hash}.signing_policy && \ [ -d ${tomcat_install_dir}/webapps/ROOT ] && openssl x509 -text -hash -in ${simpleCA_cert} > ${tomcat_install_dir}/webapps/ROOT/cacert.pem && \ echo " My CA Cert now posted @ http://$(hostname --fqdn)/cacert.pem " chmod 644 ${tomcat_install_dir}/webapps/ROOT/cacert.pem && \ [OK]) || [FAIL] #zoiks #write_as_property node_dn $(extract_openssl_dn ${simpleCA_cert}) && echo "property updated $([OK])" fi chmod 755 ${globus_global_certs_dir} chmod 644 ${globus_global_certs_dir}/* } fetch_esgf_truststore() { echo "Fetching ESGF Federation Truststore... " local truststore_file_=${1:-${truststore_file}} [ -z "${truststore_file_}" ] && echo "Sorry, cannot fetch truststore [${truststore_file_}], value not set" && return 1 [ -e "${truststore_file_}" ] && mv -v ${truststore_file_}{,.bak} if [ "$node_peer_group" = "esgf-test" ]; then checked_get ${truststore_file} ${esg_dist_url_root}/certs/test-federation/${truststore_file_##*/} else checked_get ${truststore_file} ${esg_dist_url_root}/certs/${truststore_file_##*/} fi (( $? > 1 )) && [FAIL] && mv -v ${truststore_file_}{.bak,} || [OK] apache_truststore='/etc/certs/esgf-ca-bundle.crt' [ -e "${apache_truststore}" ] && mv -v ${apache_truststore}{,.bak} if [ "$node_peer_group" = "esgf-test" ]; then checked_get ${apache_truststore} ${esg_dist_url_root}/certs/test-federation/${apache_truststore##*/} else checked_get ${apache_truststore} ${esg_dist_url_root}/certs/${apache_truststore##*/} fi (( $? > 1 )) && [FAIL] && mv -v ${apache_truststore}{.bak,} || [OK] && cat /etc/tempcerts/cacert.pem >>/etc/certs/esgf-ca-bundle.crt; # For the IDP it is good to trust yourself ;-)... if [ -e "${esg_root_dir}/config/myproxy/myproxy-server.config" ] ; then local simpleCA_cert=$(readlink -f $(grep certificate_issuer_cert "${esg_root_dir}/config/myproxy/myproxy-server.config" 2> /dev/null | awk '{print $2}' | tr -d '\"') 2> /dev/null) local simpleCA_cert_hash=$(openssl x509 -noout -in ${simpleCA_cert} -hash) _insert_cert_into_truststore ${globus_global_certs_dir}/${simpleCA_cert_hash}.0 ${truststore_file_} fi # From an SSL p.o.v. you should trust yourself as well... add_my_cert_to_truststore #sync_with_java_truststore ${truststore_file_} } #Converts ESG certificates (that can be fetch by above function) into a truststore #(adapted from original rendition by Philip Kershaw) rebuild_truststore() { debug_print "rebuild_truststore() $@" local truststore_file_=${1:-${truststore_file}} [ -z "${truststore_file_}" ] && echo "Sorry, cannot rebuild truststore [${truststore_file_}], value not set" echo echo "(Re)building truststore from esg certificates... [${truststore_file_}]" if [ ! -d ${globus_global_certs_dir} ] || (( force_install )) ; then [ ! -d ${globus_global_certs_dir} ] && echo "Sorry, No esg certificates found... in ${globus_global_certs_dir}" || echo "(forcing fresh rebuild)" echo "So fetching fresh esg certificates :-)" fetch_esgf_certificates fi #If you don't already have a truststore to build on.... #Start building from a solid foundation i.e. Java's set of ca certs... [ ! -e ${truststore_file_} ] && cp -v ${java_install_dir}/jre/lib/security/cacerts ${truststore_file_} local tmp_dir=/tmp/esg_scratch mkdir -p ${tmp_dir} local cert_files=$(find ${globus_global_certs_dir} | egrep '^.*\.0$') for cert_file in $cert_files; do _insert_cert_into_truststore ${cert_file} ${truststore_file_} done rmdir ${tmp_dir} #make sure that MY cert is in the trustore (it should be). #As a side effect there is sync'ing the truststore with what is in the JVM (( force_install )) && add_my_cert_to_truststore sync_with_java_truststore ${truststore_file_} chown ${tomcat_user}:${tomcat_group} ${truststore_file_} echo "...done" return 0 } #Takes full path to a pem certificate file and incorporates it into the given truststore # _insert_cert_into_truststore() { local cert_file=$1 [ -z "${cert_file}" ] && echo "No certificate (pem) file specified" && exit 1 local truststore_file_=${2:-${truststore_file}} echo -n "$cert_file -> " local cert_hash=$(echo ${cert_file##*/} | awk -F'.' '{print $1}') ; local der_file="${tmp_dir}/${cert_hash}.der" ; #-------------- # Convert from PEM format to DER format - for ingest into keystore openssl x509 -inform pem -in ${cert_file} -outform der -out ${der_file} #-------------- if [ -f ${truststore_file_} ]; then $java_install_dir/bin/keytool -delete -alias ${cert_hash} -keystore ${truststore_file_} \ -storepass ${truststore_password} 2>&1 > /dev/null [ $? == 0 ] && echo -n "- " || echo -n " " fi [ $? == 0 ] && echo -n "+ " || echo -n " " $java_install_dir/bin/keytool -import -alias ${cert_hash} -file ${der_file} -keystore ${truststore_file_} \ -storepass ${truststore_password} -noprompt [ $? != 0 ] && [FAIL] rm -f ${der_file} return 0 } #This takes our certificate from the keystore and adds it to the #truststore. This is done for other services that use originating #from this server talking to another service on this same host. This #is the interaction scenario with part of the ORP security mechanism. #The param here is the password of the *keystore* add_my_cert_to_truststore() { echo debug_print "add_my_cert_to_truststore() [$@]" _glean_keystore_info local keystore_file_=${keystore_file} local keystore_password_=${keystore_password} local keystore_alias_=${keystore_alias} local truststore_file_=${truststore_file} local truststore_password_=${truststore_password} local do_checks=1 while [ -n "${1}" ]; do case $1 in --keystore | -ks) shift keystore_file_=$1 debug_print "keystore_file = [${keystore_file_}]" ;; --keystore-pass | -kpass) shift keystore_password_=$1 debug_print "keystore_password = [${keystore_password_}]" ;; --alias | -a) shift keystore_alias_=$1 debug_print "alias = [${alias_}]" ;; --truststore | -ts) shift truststore_file_=$1 debug_print "truststore_file = [${truststore_file_}]" ;; --truststore-pass | -tpass) shift truststore_password_=$1 debug_print "truststore_password = [${truststore_password_}]" ;; --no-check) do_checks=0; ;; *) printf "\n ERROR: unknown switch \"$1\" \n\n" && return 3 ;; esac shift done debug_print "keystore_file_=${keystore_file_}" debug_print "keystore_password_=${keystore_password_}" debug_print "keystore_alias_=${keystore_alias_}" debug_print "truststore_file_=${truststore_file_}" debug_print "truststore_password_=${truststore_password_}" debug_print "do_checks=${do_checks}" local store_password if [ "${keystore_password:=$(cat ${ks_secret_file} 2> /dev/null)}" != "${keystore_password_}" ]; then local verify_password while [ 1 ]; do echo read -e -s -p "Please enter the password for this keystore : " store_password [ "${store_password}" = "changeit" ] && break [ -z "${store_password}" ] && echo "Invalid password [${store_password}]" && continue echo read -e -s -p "Please re-enter the password for this keystore: " verify_password if [ "${store_password}" = "${verify_password}" ] ; then $java_install_dir/bin/keytool -list -keystore ${keystore_file_} -storepass ${store_password} >& /dev/null [ $? != 0 ] && echo "$([FAIL]) Could not access private keystore ${keystore_file_} with provided password. Try again..." && continue; keystore_password_=${store_password} echo break else echo "Sorry, values did not match" echo fi done unset verify_password echo else debug_print "keystore password = ******" fi unset store_password if ((do_checks == 1)); then #only making this call to test password $java_install_dir/bin/keytool -v -list -keystore ${keystore_file_} -storepass ${keystore_password_} >& /dev/null [ $? != 0 ] && echo "$([FAIL]) Could not access private keystore ${keystore_file_} (re-run --add-my-cert-to-truststore)" && return 1; [OK] debug_print "Peforming checks against configured values..." debug_print "[$(md5sum <(echo "${keystore_password}") | awk '{print $1}')] == [$(md5sum <(echo "${keystore_password_}") | awk '{print $1}')]" if [ "${keystore_password}" != "${keystore_password_}" ]; then printf "\nWARNING: password entered does not match what's in the app server's configuration file\n" [[ ${keystore_password_} ]] && eval "perl -p -i -e 's#(?<=keystorePass=)\"([^\"]*)\"#\"${keystore_password_}\"#g' ${tomcat_install_dir}/conf/server.xml" && \ echo -n " Adjusted app server's config file... " ( local value=$(sed -n 's#.*keystorePass="\([^ "]*\)".*$#\1#p' ${tomcat_install_dir}/conf/server.xml 2> /dev/null) && echo -n "." || echo -n "x" [ $? == 0 ] && [ -n "${value}" ] && keystore_password=${value} [ "${keystore_password}" = "${keystore_password_}" ] && echo " $([OK]) " || echo " $([FAIL]) " unset value ) fi fi #---------------------------------------------------------------- #Re-integrate my public key (I mean, my "certificate") from my keystore into the truststore (the place housing all public keys I allow to talk to me) #---------------------------------------------------------------- if [ -e ${truststore_file_} ]; then echo "Re-Integrating keystore's certificate into truststore.... " echo -n "Extracting keystore's certificate... " $java_install_dir/bin/keytool -export -alias ${keystore_alias_} -file ${keystore_file_}.cer -keystore ${keystore_file_} -storepass ${keystore_password_} [ $? == 0 ] && [OK] || ([FAIL] && exit 1) debug_print "keytool -v -list -keystore ${truststore_file_} -storepass $(md5sum <(echo "${truststore_password_}") | awk '{print $1}') | egrep -i '^Alias[ ]+name:[ ]+'${keystore_alias}'$'" $java_install_dir/bin/keytool -v -list -keystore ${truststore_file_} -storepass ${truststore_password_} | egrep -i '^Alias[ ]+name:[ ]+'${keystore_alias_}'$' if [ $? == 0 ]; then echo "Detected Alias \"${keystore_alias}\" Present... Removing... Making space for certificate... " $java_install_dir/bin/keytool -delete -alias ${keystore_alias_} -keystore ${truststore_file_} -storepass ${truststore_password_} 2>&1 > /dev/null #for ORP [ $? != 0 ] && echo " ERROR: problem deleting ${keystore_alias} key from keystore!" && return 1 fi echo "Importing keystore's certificate into truststore... " $java_install_dir/bin/keytool -import -v -trustcacerts -alias ${keystore_alias_} -keypass ${keystore_password_} -file ${keystore_file_}.cer -keystore ${truststore_file_} -storepass ${truststore_password_} -noprompt [ $? == 0 ] && [OK] || ([FAIL] && exit 1) sync_with_java_truststore ${truststore_file_} echo -n "cleaning up after ourselves... " rm -v ${keystore_file_}.cer [ $? == 0 ] && [OK] || [FAIL] fi chown ${tomcat_user}:${tomcat_group} ${truststore_file_} #---------------------------------------------------------------- return 0 } #--- #Original command instructions can be found here: #http://www.sial.org/howto/openssl/csr/ #arg 1 -> what we want to name the public cert #arg 2 -> what we want to name the private key #arg 3 -> what we want the DN to be for public cert generate_ssl_key_and_csr() { echo "Generating private host key... " local private_key=${2:-${tomcat_conf_dir}/hostkey.pem} openssl genrsa -out ${private_key} 1024 [ $? == 0 ] && [OK] || ([FAIL] && return 1) chmod 400 ${private_key} #NOTE: To include DN at the command line #openssl req -new -nodes -subj '/O=ESGF/OU=ESGF.ORG/CN=esg-test1.llnl.gov' -key hostkey.pem -out esg-test1.llnl.gov-esg-node.csr echo "Generating Certificate Signing Request (csr)... " local public_cert_req=${1:-${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.csr} local public_cert_dn=${3:-$(extract_keystore_dn ${keystore_file})} echo "Using DN = [${public_cert_dn}]" #At this point public_cert_dn is empty if extract_keystore_dn was unable to perform successfully #So then we run the regular request generation that will ask you a series of questions to build the DN if [ -z "${public_cert_dn}" ]; then echo "openssl req -new -nodes -key ${private_key} -out ${public_cert_req}" openssl req -new -nodes -key ${private_key} -out ${public_cert_req} [ $? == 0 ] && [OK] || ([FAIL] && return 2) else #Or, there was indeed an extraction of the DN from the keytool or I manually provided the DN #So then generate the request with the DN... echo "openssl req -new -nodes -subj ${public_cert_dn} -key ${private_key} -out ${public_cert_req}" openssl req -new -nodes -subj "${public_cert_dn}" -key ${private_key} -out ${public_cert_req} [ $? == 0 ] && echo "$([OK])*" || (echo "$([FAIL])*" && return 2) fi chmod 644 ${public_cert_req} >& /dev/null if [ -z "$(openssl req -text -noout -in ${public_cert_req} | sed -n '/[ ]*Subject:.*[ ]O=.*[ ]OU=.*[ ]CN=.*/p')" ]; then echo echo "------------------------------------------------------------------" echo "The certificate reqeust generated does NOT have a suitable subject line. :-(" echo "Please re-run and be sure that the DN has been set appropriately (see below)" echo "The DN may be specified explicitly via the following command:" echo echo "> ${0##*/} --generate-ssl-key-and-csr ${public_cert_req} ${tomcat_conf_dir}/hostkey.pem /O=ESGF/OU=ESGF.ORG/CN=${esgf_host}" echo echo "Please remove the faulty csr (${public_cert_req}) now, and retry" echo "------------------------------------------------------------------" echo return 1 fi echo "Generating 30 day temporary self-signed certificate... " openssl x509 -req -days 30 -extensions v3_ca -in ${public_cert_req} -signkey ${private_key} -out ${public_cert_req%.*}.pem [ $? == 0 ] && [OK] || ([FAIL] && return 3) openssl x509 -noout -text -in ${public_cert_req%.*}.pem echo echo "--------------------------------------------------------" echo "In Directory: $(pwd)" echo "Generated private key: $(ls ${private_key})" echo "Generated certificate: $(ls ${public_cert_req})" echo "Please obtain and install appropriate certificates at the earliest. Execute esg-node --cert-howto for details."; #echo "Then run %> esg-node --install-ssl-keypair (use --help for details)" echo "--------------------------------------------------------" echo } check_certificates() { echo "check_certificates..." source ${esg_functions_file} check_cert_expiry_for_files ${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.pem (source ${scripts_dir}/esg-globus && globus_check_certificates $@) } ########################### # A Note on DNs (order is important) # What we assert in this script as standard is: # standard ------> /O=Grid/OU=GlobusTest/OU=simpleCA-pcmdi3.llnl.gov/CN=pcmdi7.llnl.gov [outer-to-inner] # java-style -----> CN=pcmdi7.llnl.gov, OU=simpleCA-pcmdi3.llnl.gov, OU=GlobusTest, O=Grid [inner-to-outer] # (openssl x509) -> O=Grid, OU=GlobusTest, OU=simpleCA-pcmdi3.llnl.gov, CN=pcmdi7.llnl.gov [outer-to-inner] # # openssl will take a -subj dn string in standard format # what has been chosen as our standard is the most unix friendly # (spaces suck as delims and it is often good to be able to tack on the most specific attribute [...CN=?] at the end and great for regex's) # -gavin ########################### #------ #HELPER / UTILITY FUNCTION (aka private) #------ #arg 1 -> the location of the java keystore file #arg 2 -> the password to the java keystore file #The output of this function is the DN pulled from the keystore and transformed to "standard" format #Ex: /O=Grid/OU=GlobusTest/OU=simpleCA-pcmdi3.llnl.gov/CN=pcmdi7.llnl.gov extract_keystore_dn() { #TODO: check to see if there is a java keystore present... If so #read the store and pull out the "Owner:" DN and use it as the DN #for this new cert being generated. (perhaps ask the user if they #want to use the 'discovered' DN) local dn="" local keystore_file=${1:-${keystore_file}} if [ -e ${keystore_file} ]; then local storepass=${2} local storepass_switch="" #If no password is provided then we want to at least make sure the command line is clean such that keytool will ask you for a password #this is a bit of defensive coding so we don't run into the problem of not having the password and running a broken arg sequence [ -n "${storepass}" ] && storepass_switch="-storepass ${storepass}" #Note: I apologize for this horribile hacking of regex (this should be more cleanly accomplished with sed or perl using a capture group or a simple look behind) dn=$($java_install_dir/bin/keytool -list -v -keystore ${keystore_file} ${storepass_switch} | egrep -m 1 '^Owner:[ ]*?*' | tr -s " " | sed -e 's@^Owner: @@' | sed -e s@", "@" "@g | tac -s " " | xargs | sed -e 's@ @/@g') fi echo "/${dn}" } append_esgf_log4j_entries() { local log4j_file=${1:-"log4j.xml"} shift #Is there a sexier way to assign a default array? local logger_names=($@) if (( ${#logger_names[@]} == 0 )); then logger_names=("esg" "esgf") fi [ ! -e "${log4j_file}" ] && echo "WARNING: Could not find ${log4j_file} (no entries can be written)" && return 1 echo "Inspecting ${log4j_file}..." for logger_name in ${logger_names[@]} ; do if [ -z "$(sed -n '/.*//p' ${log4j_file} cat >> ${log4j_file} < EOF else echo "Detected existing log4j.xml entry for ${logger_name} (not writing...)" fi done return 0 } #Regex's the output from openssl's x509 output in "openssl" format: #Subject: O=Grid, OU=GlobusTest, OU=simpleCA-pcmdi3.llnl.gov, CN=pcmdi7.llnl.gov #and transforms it to our "standard" format #/O=Grid/OU=GlobusTest/OU=simpleCA-pcmdi3.llnl.gov/CN=pcmdi7.llnl.gov #arg 1 -> the location of the x509 pem file extract_openssl_dn() { local public_cert=${1:-"/etc/grid-security/hostcert.pem"} echo /$(openssl x509 -text -noout -in ${public_cert} | sed -n 's/[ ]*Subject:[ ]*\(.*\)$/\1/p' | sed -n 's#, #/#pg') } java2standard_dn() { echo "$*" | tr -s " " | awk 'BEGIN { FS = ", " } { for (i = NF; i >= 1; --i) print "/"$i}' | xargs | sed -n 's# ##gp' } #Helper function: #Takes in a dn in the sytle: #/O=Grid/OU=GlobusTest/OU=simpleCA-pcmdi3.llnl.gov/CN=esg-test1.llnl.gov #and turns it into a java compatible dn... #"CN=esg-test1.llnl.gov, OU=simpleCA-pcmdi3.llnl.gov, OU=GlobusTest, O=Grid" standard2java_dn() { local input=$1 ans=$(echo ${input} | awk 'BEGIN { FS = "/" } { for (i = NF; i > 1; --i) print $i", " }'| xargs) ans=${ans%,} echo "${ans}" } #(Once you have submitted the CSR and have gotten it back *signed*; now install the keypair) #arg 1 -> private key #arg 2 -> public cert (the returned signed CSR) #arg 3 -> keystore name #arg 4 -> alias #arg 5 -> password (The value you want *set* for the keystore and internal private key) install_keypair() { local public_cert=${1:-${tomcat_conf_dir}/"${esgf_host:-$(hostname --fqdn)}-esg-node.pem"} local private_key=${2:-${tomcat_conf_dir}/"hostkey.pem"} local keystore_name=${3:-${keystore_file}} local keystore_alias=${4:-${keystore_alias}} local store_password=${5} local truststore_name=${6:-${truststore_file}} echo "private key = ${private_key}" echo "public cert = ${public_cert}" echo "keystore name = ${keystore_name}" echo "keystore alias = ${keystore_alias}" local ks_secret=$(cat ${ks_secret_file} 2> /dev/null) local default_passwd=${ks_secret:=changeit} if [ ! -f ${public_cert} ] ; then echo ${public_cert} not found or not readable. exit -1 fi if [ ! -f ${private_key} ] ; then echo ${private_key} not found or not readable. exit -1 fi if [ -z "${store_password}" ]; then local verify_password while [ 1 ]; do echo read -e -s -p "Please set the password for this keystore : " store_password [ "${store_password}" = "${default_passwd}" ] && break [ -z "${store_password}" ] && echo "Invalid password [${store_password}]" && continue echo read -e -s -p "Please re-enter the password for this keystore: " verify_password if [ "${store_password}" = "${verify_password}" ] ; then echo break else echo "Sorry, values did not match" echo fi done unset verify_password echo else echo "store_password = ******" fi echo "truststore_name = ${truststore_name}" local certfiles=() local certfile_entry echo "Please enter your Certificate Athority's certificate chain file(s)" echo " [enter each cert file/url press return, press return with blank entry when done]" echo while [ 1 ]; do read -e -p "certfile> " certfile_entry [ -z ${certfile_entry} ] && break certfiles=("${certfiles[@]} ${certfile_entry}") done cp ${private_key} /etc/certs/hostkey.pem cp ${public_cert} /etc/certs/hostcert.pem echo -n >/etc/certs/tmpchain for cert in ${certfiles[@]}; do if [ ! -f $cert ] ; then echo ${cert} not found or not readable. exit -1 fi cat $cert >>/etc/certs/tmpchain echo >> /etc/certs/tmpchain done cp /etc/certs/tmpchain /etc/certs/cachain.pem chmod 400 /etc/certs/hostkey.pem chmod 644 /etc/certs/hostcert.pem chmod 644 /etc/certs/cachain.pem debug_print "make_fresh_keystore ${keystore_name} ${keystore_alias} ${store_password} ${private_key} -- ${public_cert} ${certfiles[@]}" make_fresh_keystore ${keystore_name} ${keystore_alias} ${store_password} ${private_key} -- ${public_cert} ${certfiles[@]} if [ $? != 0 ]; then echo "ERROR: Problem with key generation and/or keystore construction" mv ${keystore_name}{.bak,} exit 5 fi [ "$(readlink -f ${public_cert})" -nt "$(readlink -f ${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.pem)" ] && mv -v ${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.pem{,.old} [ "$(readlink -f ${public_cert})" != "$(readlink -f ${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.pem)" ] && cp -v ${public_cert} ${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.pem ##Transform the keypair into the DER format understood by Java's keystore ##And generate a new keystore from that pair. #[ -e "${keystore_name}" ] && mv ${keystore_name} ${keystore_name}.bak #convert_keys ${private_key} ${public_cert} && \ # create_keystore ${private_key%.*}.der ${public_cert%.*}.der ${keystore_name} ${keystore_alias} ${store_password} #[ $? != 0 ] && echo "ERROR: Problem with key generation and/or keystore construction" && mv ${keystore_name}.bak ${keystore_name} && exit 5 echo "${store_password}" > ${ks_secret_file} chmod 640 ${ks_secret_file} chown ${installer_uid}:${tomcat_group} ${ks_secret_file} #(In order for ORP or any other local service to trust eachother put your own cert into the truststore) [ -e "${truststore_name}" ] && mv -v ${truststore_name}{,.bak} rebuild_truststore ${truststore_name} && add_my_cert_to_truststore --keystore-pass ${store_password} [ $? != 0 ] && echo "ERROR: Problem with truststore generation" && mv ${truststore_name}.bak ${truststore_name} && exit 6 #register ${esgf_idp_peer} echo "Please restart this node for keys to take effect: \"$0 restart\"" echo } #The following helper function creates a new keystore for your tomcat installation # arg 1 -> keystore name # arg 2 -> keystore alias # arg 3 -> keystore password # arg 4 -> private key # arg 5 -> public cert # arg 6.. -> intermediate certificate(s) make_fresh_keystore() { debug_print "make_fresh_keystore() [$@]" #------------- #Set default values such that env vars may be used #------------- local keystore_name local keystore_alias local store_password local private_key local provider="org.bouncycastle.jce.provider.BouncyCastleProvider" local install_dir=${esg_tools_dir}/idptools #------------- #Collect args... #------------- local certfiles=() local arg_length=$# for ((i=1; i <= ${arg_length} ; i++)) ; do [ "$1" = "--" ] && shift && certfiles=($@) && break ((i==1)) && keystore_name=$1 && shift ((i==2)) && keystore_alias=$1 && shift ((i==3)) && store_password=$1 && shift ((i==4)) && private_key=$1 && shift done local size=${#certfiles[@]} debug_print "[certfiles = ${certfiles[@]}]" [ ! -e "${private_key}" ] && echo "file [${private_key}] does not exist" && return 1 (( size == 0 )) && echo "no certificate files listed" && usage #------------- #Display values #------------- echo echo "Keystore name : ${keystore_name}" echo "Keystore alias: ${keystore_alias}" echo "Store password: ${store_password}" echo "Private key : ${private_key}" echo "Certificates..." mkdir -p ${install_dir} [ $? != 0 ] && echo "exiting..." && return 1 PATH=${PATH}:${install_dir}/bin local certbundle="${install_dir}/cert.bundle" local ca_chain_bundle="${install_dir}/ca_chain.bundle" local content local skip_check=0 local count=1 for ((i=0; i ${certfiles[i]}" echo "${certfiles[i]}" | egrep '^http' if [ $? == 0 ]; then curl -s -k ${certfiles[i]} > ${certbundle} && [OK] skip_check=1 else cat ${certfiles[i]} > ${certbundle} fi cat /dev/null > ${ca_chain_bundle} elif ((i == (size-1) )) ; then echo " Root Cert -------> ${certfiles[i]}" echo "${certfiles[i]}" | egrep '^http' if [ $? == 0 ]; then content=$(curl -s -k ${certfiles[i]}) [ $? != 0 ] && echo "Cannot connect to ${certfiles[i]}" && return 1 echo "${content}" | grep "Not Found" >& /dev/null [ $? == 0 ] && echo "${content}" && return 1 debug_print "** ${content}" echo "$content" >> ${certbundle} && \ echo "$content" >> ${ca_chain_bundle} && \ [OK] skip_check=1 else cat ${certfiles[i]} >> ${certbundle} cat ${certfiles[i]} >> ${ca_chain_bundle} fi else echo " Intermediate [$((count++))] --> ${certfiles[i]}" echo "${certfiles[i]}" | egrep '^http' if [ $? == 0 ]; then content=$(curl -s -k ${certfiles[i]}) [ $? != 0 ] && echo "Cannot connect to ${certfiles[i]}" && return 1 echo "${content}" | grep "Not Found" >& /dev/null [ $? == 0 ] && echo "${content}" && return 1 debug_print "* ${content}" echo "$content" >> ${certbundle} && \ echo "$content" >> ${ca_chain_bundle} && \ [OK] skip_check=1 else cat ${certfiles[i]} >> ${certbundle} cat ${certfiles[i]} >> ${ca_chain_bundle} fi fi ((skip_check==0)) && [ ! -e "${certfiles[i]}" ] && echo "file [${certfiles[i]}] does not exist" && return 1 if ((skip_check==0)) ; then ((DEBUG)) && head ${certfiles[i]} ((DEBUG)) && openssl x509 -text -noout -in ${certfiles[i]} fi skip_check=0 done unset count unset content #------------- # Structural integrity checks... #------------- echo echo -n "checking that key pair is congruent... " local pair_hash=($((openssl x509 -noout -modulus -in ${certfiles[0]} | openssl md5|cut -d '=' -f2 ; openssl rsa -noout -modulus -in ${private_key} | openssl md5|cut -d '=' -f2) | uniq)) (( 1 == ${#pair_hash[@]} )) && printf "$([OK]) ${pair_hash}\n\n" || (printf "$([FAIL])\n\n" && return 1) #------------- #Let's be a little interactive with users for a sanity check #------------- local answer="Y" read -e -p "Is the above information correct? [Y/n] " answer [ -n "${answer}" ] && [ "${answer}" != "Y" ] && [ "${answer}" != "y" ] && echo "exiting..." && return 1 local derkey="${install_dir}/key.der" #------------- #Make empty keystore... #------------- echo -n "creating keystore... " #create a keystore with a self-signed cert local dname=${dname:-"CN=${esgf_host:-$(hostname --fqdn)}, ${default_dname}"} [ -e "${keystore_name}" ] && mv ${keystore_name} ${keystore_name}.bak $java_install_dir/bin/keytool -genkey -keyalg RSA -alias "${keystore_alias}" \ -keystore "${keystore_name}" \ -storepass "${store_password}" \ -keypass "${store_password}" \ -validity 360 \ -dname "${dname}" \ -noprompt [ $? != 0 ] && (echo "Problem with generating initial keystore... $([FAIL])" && return 1) || [OK] echo -n "clearing keystore... " #delete the cert $java_install_dir/bin/keytool -delete -alias "${keystore_alias}" -keystore "${keystore_name}" -storepass "${store_password}" [ $? != 0 ] && (echo "Problem with preparing initial keystore... $([FAIL])" && return 1) || [OK] #------------- #Convert your private key into from PEM to DER format that java likes #------------- echo -n "converting private key... " debug_print -n "openssl pkcs8 -topk8 -nocrypt -inform PEM -in ${private_key} -outform DER -out ${derkey} " openssl pkcs8 -topk8 -nocrypt -inform PEM -in ${private_key} -outform DER -out ${derkey} [ $? != 0 ] && (echo "Problem with preparing initial keystore... $([FAIL])" && return 1) || [OK] #------------- #Now we gather up all the other keys in the key chain... #------------- echo -n "checking that chain is valid... " if [ -n "${ca_chain_bundle}" ]; then debug_print -n "openssl verify -CAfile ${ca_chain_bundle} ${ca_chain_bundle} " local chain_check=$(openssl verify -CAfile ${ca_chain_bundle} ${ca_chain_bundle} | grep -i error) if [ -z "${chain_check}" ]; then printf "$([OK])\n\n" else printf "$([FAIL])\n ${chain_check}\n(hint: did you include the root cert for the chain :-)\n" return 1 fi else echo "Hmmm... no chain provided [${ca_chain_bundle}], skipping this check..." fi #------------- #Generating new keystore #------------- echo -n "Constructing new keystore content... " local command="extkeytool -importkey -keystore ${keystore_name} -alias ${keystore_alias} -storepass ${store_password} -keypass ${store_password} -keyfile ${derkey} -certfile ${certbundle} -provider ${provider}" debug_print ${command} ${command} > /dev/null local ret=$? #FYI: Code 127 is "command not found" if [ ${ret} == 127 ]; then echo "Hmmm... Cannot find extkeytool... :-( Let me get it for you! :-) [one moment please...]" curl -s -L --insecure ${extkeytool_download_url} | (cd ${install_dir}; tar xvzf -) echo "NOW... let's retry building your new keystore...." ${command} local ret=$? fi [ ${ret} != 0 ] && echo "$([FAIL]) Problem with running extkeytool :-(" && return 1 [ ${ret} == 0 ] && [OK] echo echo "How do things look?" $java_install_dir/bin/keytool -v -list -keystore "${keystore_name}" -storepass ${store_password} | egrep '(Owner|Issuer|MD5|SHA1|Serial number):' if [ $? == 0 ]; then echo echo "Mmmm, freshly baked keystore!" echo "If Everything looks good... then replace your current tomcat keystore with ${keystore_name}, if necessary." echo "Don't forget to change your tomcat's server.xml entry accordingly :-)" echo "Remember: Keep your private key ${private_key} and signed cert ${certfiles[0]} in a safe place!!!" echo answer="Y" read -e -p "Is the above information correct? [Y/n] " answer [ -n "${answer}" ] && [ "${answer}" != "Y" ] && [ "${answer}" != "y" ] && echo "Eh... try again... ;-)" && return 1 else echo echo "Hmmm... something didn't quite go so right... double check things..." return 1 fi return 0 } #************ #(DEPRECATED) #************ #------ #HELPER FUNCTION (aka private) #------ #converts key pairs from PEM format to DER format #(DER format is amenable to Java's keystore mechanism) #Original command instructions can be found here: #http://www.ci.uchicago.edu/wiki/bin/view/ESGProject/JarSigningNotes #arg 1 -> private key #arg 2 -> public cert convert_keys() { echo -n "Converting private key from PEM -> DER format... " local private_key=${1:-"hostkey.pem"} [ ! -e ${private_key} ] && echo "Sorry, cannot find ${private_key}" && return 2 openssl pkcs8 -topk8 -nocrypt -in ${private_key} -inform PEM -out ${private_key%.*}.der -outform DER [ $? == 0 ] && [OK] || ([FAIL] && return 1) echo -n "Converting public cert from PEM -> DER format... " local public_cert=${2:-${esgf_host:-$(hostname --fqdn)}-esg-node.pem} [ ! -e ${public_cert} ] && echo "Sorry, cannot find ${public_cert}" && return 2 openssl x509 -in ${public_cert} -inform PEM -out ${public_cert%.*}.der -outform DER [ $? == 0 ] && [OK] || ([FAIL] && return 1) echo pwd ls -l ${private_key%.*}.der ${public_cert%.*}.der echo return 0 } #************ #(DEPRECATED) #************ #------ #HELPER FUNCTION (aka private) #------ #Creates a new keystore based on given keypair #Original command instructions can be found here: #http://www.agentbob.info/agentbob/79-AB.html #arg 1 -> private key #arg 2 -> public cert #arg 3 -> keystore name* #arg 4 -> alias* #arg 5 -> password* (for keystore and private key) #[*Have default values - non manditory] create_keystore() { local private_key=${1:-"hostkey.der"} local public_cert=${2:-${esgf_host:-$(hostname --fqdn)}-esg-node.der} local keystore_name=${3:-${keystore_file}} local key_alias=${4:-${keystore_alias}} local password=${5:-${keystore_password}} checked_get ./ImportKey.class ${utils_url}/ImportKey.class $((force_install)) (( $? > 1 )) && echo "Could not fetch keystore generator" && return 1 [ ! -e ${private_key} ] && echo "Sorry, cannot find ${private_key}" && return 2 [ ! -e ${public_cert} ] && echo "Sorry, cannot find ${public_cert}" && return 2 CLASSPATH=. java -Dkeystore=${keystore_name} ImportKey ${private_key} ${public_cert} ${key_alias} ${password} [ $? != 0 ] && echo "$([FAIL]) Could not execute keystore generator" && return 1 echo $java_install_dir/bin/keytool -list -v -keystore ${keystore_name} -storepass ${password} return 0 } #--- #************ #(DEPRECATED) #************ #Once the generated CSR has been submitted to a CA and signed... the returned #signed certificate needs to be imported into the *existing* keystore. (and to trust store: for ORP) #arg 1 -> signed certificate file returned to you by your CA install_signed_certificate() { local signed_cert_file=${1:-${HOME}/${esgf_host:-$(hostname --fqdn)}-esg-node.pem} echo echo "Installing Signed Host Certificate: ${signed_cert_file} " [ ! -e ${signed_cert_file} ] && echo "ERROR: Could not find signed cert file: ${signed_cert_file}" && return 1 local store_password=${keystore_password} local verify_password while [ 1 ]; do echo read -e -s -p "Please enter the password for this keystore : " store_password [ "${store_password}" = "changeit" ] && break [ -z "${store_password}" ] && echo "Invalid password [${store_password}]" && continue echo read -e -s -p "Please re-enter the password for this keystore: " verify_password if [ "${store_password}" = "${verify_password}" ] ; then echo break else echo "Sorry, values did not match" echo fi done unset verify_password echo if [ -z "$(echo ${signed_cert_file##*.} | egrep "($certificate_extensions)")" ]; then #-------------- #convert from PEM format to a DER format - for ingeest into the keystores :-) echo "converting certificate from PEM format to DER format..." echo "openssl x509 -in ${signed_cert_file} -inform PEM -out ${signed_cert_file%.*}.der -outform DER" openssl x509 -inform PEM -in ${signed_cert_file} -outform DER -out ${signed_cert_file%.*}.der [ $? == 0 ] && signed_cert_file=${signed_cert_file%.*}.der #-------------- else echo "Apparently (based on file extension) this file is already in DER format" fi [ -e ${keystore_file} ] && echo "(making backup copy of keystore)" && cp -v ${keystore_file} ${keystore_file}.bak #-------------- $java_install_dir/bin/keytool -v -list -keystore ${tomcat_conf_dir}/keystore-tomcat -storepass changeit | egrep -i '^Alias[ ]+name:[ ]+root$' if [ $? == 0 ]; then $java_install_dir/bin/keytool -delete -alias root -keystore ${keystore_file} -storepass ${store_password} [ $? != 0 ] && echo " ERROR: problem deleting root key from keystore!" && return 1 fi echo "keytool -import -trustcacerts -alias root -file ${signed_cert_file} -keystore ${keystore_file} -storepass *****" $java_install_dir/bin/keytool -import -trustcacerts -alias root -file ${signed_cert_file} -keystore ${keystore_file} -storepass ${store_password} local ret=$? if [ $ret == 0 ]; then [OK] else [FAIL] echo "(Restoring original keystore)" cp -v ${keystore_file}.bak ${keystore_file} return $ret fi chown ${tomcat_user}:${tomcat_group} ${keystore_file} #-------------- add_my_cert_to_truststore --keystore-pass ${store_password} unset store_password } #************ #(DEPRECATED) #************ #This should only be run AFTER your signed certificate has been already installed into the keystore! export_keystore_as_globus_hostkeys() { mkdir -p ${workdir} pushd ${workdir} >& /dev/null checked_get ./ExportPriv.class ${utils_url}/ExportPriv.class $((force_install)) (( $? > 1 )) && echo " ERROR: Could not download utility class(1) for exporting certificates" && popd && return 1 checked_get ./Base64Coder.class ${utils_url}/Base64Coder.class $((force_install)) (( $? > 1 )) && echo " ERROR: Could not download utility class(2) for exporting certificates" && popd && return 1 popd >& /dev/null local CP=".:${workdir}" [ ! -e ${keystore_file} ] && echo "Cannot locate keystore \"${keystore_file}\"" && return 2 local store_password=${keystore_password} local verify_password while [ 1 ]; do echo read -e -s -p "Please enter the password for this keystore : " store_password [ "${store_password}" = "changeit" ] && break [ -z "${store_password}" ] && echo "Invalid password [${store_password}]" && continue echo read -e -s -p "Please re-enter the password for this keystore: " verify_password if [ "${store_password}" = "${verify_password}" ] ; then echo break else echo "Sorry, values did not match" echo fi done unset verify_password echo #-------------- #NOTE: To extract the (private) key from the keystore we use a #Java program called ExportPriv. The result is a PEM formatted #private key file that we can directly use. #see: http://www.conshell.net/wiki/index.php/Keytool_to_OpenSSL_Conversion_tips echo -n "Extracting keystore's key... (private) " [ -e ${globus_global_certs_dir%/*}/hostkey.pem ] && cp ${globus_global_certs_dir%/*}/hostkey.pem ${globus_global_certs_dir%/*}/hostkey.pem.last [ -e ${globus_global_certs_dir%/*} ] && mkdir -p ${globus_global_certs_dir%/*} ((DEBUG)) && \ printf "\n${JAVA_HOME}/bin/java -classpath ${CP} ExportPriv ${keystore_file} ${keystore_alias} ************* > ${globus_global_certs_dir%/*}/hostkey.pem\n" ${JAVA_HOME}/bin/java -classpath ${CP} ExportPriv ${keystore_file} ${keystore_alias} ${store_password} > ${globus_global_certs_dir%/*}/hostkey.pem local ret=$? [ $ret == 0 ] && [OK] || [FAIL] chmod 600 ${globus_global_certs_dir%/*}/hostkey.pem #-------------- #-------------- #NOTE: To extract the (public) certificate we use the keytool. #Keytool extracts everything (except cert requests) in DER format #so, the exporting of this public certificate is no different - it #is in DER format, so we must convert. Hence the openssl calls #subsequent to the export. Also notice that the alias being used #is *"root"* this is a special alias that was used when importing #the signed certificate (once it was converted from PEM to DER for #import) echo -n "Extracting keystore's certificate... (public) " [ -e ${globus_global_certs_dir%/*}/hostcert.pem ] && cp ${globus_global_certs_dir%/*}/hostcert.pem ${globus_global_certs_dir%/*}/hostcert.pem.last ((DEBUG)) && \ printf "\nkeytool -export -alias ${keystore_alias} -file ${globus_global_certs_dir%/*}/hostcert.der -keystore ${keystore_file} -storepass *************\n" $java_install_dir/bin/keytool -export -alias root -file ${globus_global_certs_dir%/*}/hostcert.der -keystore ${keystore_file} -storepass ${store_password} ret=$? [ $ret == 0 ] && [OK] || [FAIL] echo -n "Converting DER cert ${globus_global_certs_dir%/*}/hostcert.der to PEM format " openssl x509 -in ${globus_global_certs_dir%/*}/hostcert.der -inform DER -out ${globus_global_certs_dir%/*}/hostcert.pem -outform PEM ret=$? [ $ret == 0 ] && [OK] || [FAIL] openssl x509 -noout -text -in ${globus_global_certs_dir%/*}/hostcert.pem chmod 644 ${globus_global_certs_dir%/*}/hostcert.pem #-------------- unset store_password #for safe keeping post the truststore and keystore in /etc/grid-security/saved if [ -d ${globus_global_certs_dir%/*} ]; then mkdir -p ${globus_global_certs_dir%/*}/saved chmod 700 ${globus_global_certs_dir%/*}/saved rm ${globus_global_certs_dir%/*}/saved/* >& /dev/null cp ${truststore_file} ${globus_global_certs_dir%/*}/saved/${truststore_file##*/}.$(date ${date_format}).bak cp ${keystore_file} ${globus_global_certs_dir%/*}/saved/${keystore_file##*/}.$(date ${date_format}).bak fi echo "(cleanup)" rm -v ${globus_global_certs_dir%/*}/hostcert.der } ##### # This function is for pulling in keys from hosts we wish to # communicate with over an encrypted ssl connection. This function # must be run after tomcat is set up since it references server.xml. ##### #(called by setup_idp_peer) #arg1 - hostname of the machine with the cert you want to get #(arg2 - password to truststore where cert will be inserted) #(arg3 - password to keystore - only applicable in "local" registration scenario) register() { echo "Installing Public Certificate of Target Peer Node...[$1]" mkdir -p ${workdir} # >& /dev/null pushd ${tomcat_conf_dir} >& /dev/null local input=${1} [ -z "${input}" ] && popd && echo "Could not register: No endpoint specified" && return 1 local ssl_endpoint=${input%%/*} #just need the hostname [ "${ssl_endpoint}" = "self" ] && ssl_endpoint=${esgf_host} local ssl_port=${ssl_port:-443} local my_truststore_password=${2:-$(sed -n 's#.*truststorePass="\([^ "]*\)".*$#\1#p' ${tomcat_install_dir}/conf/server.xml)} [ -z "${my_truststore_password}" ] && my_truststore_password="changeit" #Tomcat's default if [ "${ssl_endpoint}" = "${esgf_host}" ]; then #For local scenario need to pull from local keystore and put into local truststore... need keystore password in addition local my_keystore_password=${3:-$(sed -n 's#.*keystorePass="\([^ "]*\)".*$#\1#p' ${tomcat_install_dir}/conf/server.xml)} add_my_cert_to_truststore --keystore-pass ${my_keystore_password} && echo ":-)" || (echo ":-(" && return 1) else mkdir -p ${workdir} # >& /dev/null pushd ${workdir} >& /dev/null #Download the Java code used for certificate installation (into $workdir) checked_get './InstallCert.class' ${utils_url}/InstallCert.class $((force_install)) (( $? > 1 )) && echo " ERROR: Could not download utility class(1) for installing certificates" && popd && return 1 checked_get './InstallCert$SavingTrustManager.class' ${utils_url}/'InstallCert$SavingTrustManager.class' $((force_install)) (( $? > 1 )) && echo " ERROR: Could not download utility class(2) for installing certificates" && popd && return 1 popd # >& /dev/null local CP=".:${workdir}" #[ "${ssl_endpoint}" = "${esgf_host}" ] && start_tomcat #NOTE: The InstallCert code fetches Java's jssecacerts file (if #not there then uses the cacerts file) from java's jre and then adds the target's cert to it. #The output of the program is a new file named jssecacerts! So here we get the output and rename it. echo "${JAVA_HOME}/bin/java -classpath ${CP} InstallCert ${ssl_endpoint}:${ssl_port} ${my_truststore_password} ${truststore_file}" ${JAVA_HOME}/bin/java -classpath ${CP} InstallCert ${ssl_endpoint}:${ssl_port} ${my_truststore_password} ${truststore_file} local ret=$? fi chmod 644 ${truststore_file} chown ${tomcat_user}:${tomcat_group} ${truststore_file} sync_with_java_truststore ${truststore_file} popd >& /dev/null return $ret } #arg 1 - The truststore file to sync with Java's sync_with_java_truststore() { if [ ! -e ${JAVA_HOME}/jre/lib/security/jssecacerts ] && [ -e ${JAVA_HOME}/jre/lib/security/cacerts ]; then cp ${JAVA_HOME}/jre/lib/security/cacerts ${JAVA_HOME}/jre/lib/security/jssecacerts fi local java_truststore=${JAVA_HOME}/jre/lib/security/jssecacerts local external_truststore=${1:-$(readlink -f ${truststore_file})} echo -n "Sync'ing ${external_truststore} with ${java_truststore} ... " [ ! -e "${external_truststore}" ] && echo "[FAIL]: Cannot locate ${external_truststore}" && return 1 #try to not do more work (copying bits) if you don't have to... diff ${external_truststore} ${java_truststore} >& /dev/null && echo "$([OK])." && return 0 [ -e "${java_truststore}" ] && cp ${java_truststore}{,.bak} cp ${external_truststore} ${java_truststore} chmod 644 ${java_truststore} chown -R ${installer_uid}:${installer_gid} ${java_truststore}* [OK] } verify_thredds_credentials() { local ret=0 local thredds_esg_ini_file=${ESGINI:-${publisher_home_dir}/${publisher_config_file}} local tomcat_username local tomcat_password_hash echo "Inspecting tomcat... " read tomcat_username tomcat_password_hash < <(echo $(sed -n 's/.*username=\"\([^ ]*\)\"[ ]*password=\"\([a-zA-Z0-9\$]*\)\".*/\1 \2/p' ${tomcat_users_file})) echo -n "Inspecting publisher... " local thredds_username=$(sed -n 's@^[^#]*[ ]*thredds_username[ ]*=[ ]*\(.*\)$@\1@p' ${thredds_esg_ini_file}) && echo -n ":" || (echo -n "x" && ((ret++)) ) local thredds_password=$(sed -n 's@^[^#]*[ ]*thredds_password[ ]*=[ ]*\(.*\)$@\1@p' ${thredds_esg_ini_file}) && echo -n "-" || (echo -n "x" && ((ret++)) ) local thredds_password_hash=$($tomcat_install_dir/bin/digest.sh -a SHA ${thredds_password} | cut -d ":" -f 2) && echo ")" || (echo -n "x" && ((ret++)) ) echo -n "Checking username... " echo -n "${tomcat_username} = ${thredds_username} " if [ "${tomcat_username}" = "${thredds_username}" ]; then [OK] else [FAIL] ((ret++)) fi echo -n "Checking password... " echo -n "${tomcat_password_hash} = ${thredds_password_hash} " if [ "${tomcat_password_hash}" = "${thredds_password_hash}" ]; then [OK] else [FAIL] ((ret++)) fi ((ret == 0)) && echo "Verified" && return 0 echo echo "Sorry, your credentials are not valid..." local answer="N" read -e -p "Would you like to reset the app server's credentials accordingly? [y/N]: " doreset [ -n "${doreset}" ] && answer=$(echo ${doreset} | tr 'a-z' 'A-Z') local security_admin_password="" [ -e "${esgf_secret_file}" ] && security_admin_password=$(cat ${esgf_secret_file} 2> /dev/null) || (echo "error: cannot find passwd file" && return 1) [ -z "${security_admin_password}" ] && echo "node admin password not found" && return 1 if [ "${answer}" = "Y" ]; then echo -n "Editing files accordingly... " #edit publisher sed -i 's#^[ ]*\(thredds_username[ ]*=[ ]*\).*$#\1'${tomcat_username}'#' ${thredds_esg_ini_file} && echo -n ":" || (echo -n "x" && ((ret++)) ) sed -i 's#^[ ]*\(thredds_password[ ]*=[ ]*\).*$#\1'${security_admin_password}'#' ${thredds_esg_ini_file} && echo -n "-" || (echo -n "x" && ((ret++)) ) #edit tomcat-users.xml sed -i 's#'${tomcat_password_hash}'#'$($tomcat_install_dir/bin/digest.sh -a SHA ${security_admin_password} | cut -d ":" -f 2)'#' ${tomcat_users_file} && echo ")" || (echo "x" && ((ret++)) ) fi echo return $ret } #-------------------------------------- # Setting the (index peer) node to which we will publish #-------------------------------------- #This is how we make sure that the publisher is pointing to the correct publishing service. #We edit the esg.ini file with the information in the esgf.properties file, specifically the hessian_service_url value in esg.ini set_index_peer() { #---------------------- ret=0 [ -z "${esgf_index_peer}" ] && get_property esgf_index_peer [ -z "${esgf_host}" ] && get_property esgf_host $(hostname --fqdn) [ "$1" = "nohost" ] && echo "please provide host value" && return 1 local input=${1:-${esgf_index_peer}} [ "$1" = "self" ] && input=${esgf_host} [ "$1" = "localhost" ] && input=${esgf_host} local index_type=${2:-"p2p"} echo "Setting Index Peer... to => [${input}] (endpoint type = ${index_type})" #note: config_file defined in esg-node:init() local esgf_properties_file=${config_file:-${esg_config_dir}/esgf.properties} [ ! -e "${esgf_properties_file}" ] && debug_print " WARNING: Could not locate [${esgf_properties_file}]" && return 1 #---------------------- #Fetch and Insert the Certificate for Index Peer (to let in index peer's publishingService callback) register ${input} [ $? != 0 ] && echo " Error: could not import Index Peer's [${esgf_index_peer}] certificate!" && checked_done 1 #---------------------- local publishing_service_endpoint get_property publishing_service_endpoint #selecting the approprate value for service url... if [ "${index_type}" = "gateway" ]; then publishing_service_endpoint="https://${input}/remote/secure/client-cert/hessian/publishingService" else [ "${input}" = "${esgf_host}" ] && [ -z "${publishing_service_endpoint}" ] && return 1 publishing_service_endpoint="https://${input}/esg-search/remote/secure/client-cert/hessian/publishingService" fi #locate the publisher's configuration file - esg.ini local publisher_home_dir=$(sed -n 's@[ ]*publisher.home[ ]*=[ ]*\(.*\)$@\1@p' ${esgf_properties_file}) && echo -n ":" || (echo -n "x" && ((ret++)) ) local publisher_config_file=$(sed -n 's@[ ]*publisher.config[ ]*=[ ]*\(.*\)$@\1@p' ${esgf_properties_file}) && echo -n "-" || (echo -n "x" && ((ret++)) ) local target_file=${ESGINI:-${publisher_home_dir}/${publisher_config_file}} [ -z "${target_file}" ] && debug_print " WARNING: Could not discern publisher config file: [${target_file}]" && return 1 [ ! -e "${target_file}" ] && debug_print " WARNING: Could not locate publisher config file [${target_file}]" && return 1 (( ret > 0 )) && echo && debug_print " WARNING: Will not edit ${target_file}, could not successfully glean needed information" && return ${ret} #edit in-place the esg.ini file... sed -i 's#^[ ]*\(hessian_service_url[ ]*=[ ]*\).*$#\1'${publishing_service_endpoint}'#' ${target_file} && echo -n ")" || echo -n " 8-(" echo echo "${publishing_service_endpoint}" echo write_as_property esgf_index_peer ${input%%/*} write_as_property publishing_service_endpoint #---------------------- } ############################################ # General - Utility Functions ############################################ #responds true (returns 0) if this IS intended to be a managed database #is expecting the vars: # ---- "db_host" # ---- "esgf_host" # to be set # Define: managed - (true|0) this means NOT manipulated by this script but done by external means #(hint prepend "externally" before managed to get the meaning - yes I find it confusing but Stephen likes this term :-)) #db_managed=no means that it is a LOCAL database. (I have to change this damn verbiage... what I get for following pasco-speak ;-). _is_managed_db() { local input local default get_property db_managed if((! force_install)); then [ "${db_managed}" != "yes" ] && [ -n "${db_managed}" ] && db_managed="no" && return 1 #Has to be "no" or some other value non-yes value [ "${db_managed}" = "yes" ] && return 0 #Has to be explicitly yes fi if [ -z "${db_managed}" ] ; then unset input debug_print "esgf_host = $esgf_host" debug_print "db_host = $db_host" #Try to come up with some "sensible" default value for the user... if [ "${db_host}" = "${esgf_host}" ] || [ "${db_host}" = "localhost" ] || [ -z "${db_host}" ]; then default=${db_managed:-'no'} else default=${db_managed:-'yes'} fi read -e -p "Is the database external to this node? $([ "$default" = "no" ] && echo "[y/N]" || echo "[Y/n]"): " input [ -z "${input}" ] && db_managed=${default} && write_as_property db_managed if [ -n "${input}" ]; then if [ "${input}" = "Y" ] || [ "${input}" = "y" ] || [ "${input}" = "YES" ] || [ "${input}" = "yes" ]; then db_managed="yes" else db_managed="no" fi write_as_property db_managed fi else echo "db_managed = [${db_managed}]" fi unset input unset default if [ "${db_managed}" = "yes" ] && [ -n "${db_managed}" ]; then echo "Set to use externally \"managed\" database on host: ${db_host}" return 0 else debug_print "(hmm... setting db_host to localhost)" #Note: if not managed and on the local machine... always use "localhost" db_host="localhost" return 1 fi } set_redirect() { local web_app_tld=${1:-esgf-node-manager} local default_answer=${2:-"Y"} local index_file=${tomcat_install_dir}/webapps/ROOT/index.html [ ! -e ${index_file} ] && echo "Sorry, No \"ROOT\" application found!" && return 1 [ ! -e ${tomcat_install_dir}/webapps/${web_app_tld} ] && echo "Sorry, No target appliaction directory found for ${web_app_tld}" && return 2 local redirect_content="" [ "${redirect_content}" = "$(cat ${index_file})" ] && echo "Redirect already setup" && return 0 read -e -t 20 -p "Do you wish to have the ${web_app_tld}'s page as this node's homepage? $([ "$default_answer" = "N" ] && echo "[y/N]" || echo "[Y/n]"): " default_answer [ "n" = "${default_answer}" ] || [ "N" = "${default_answer}" ] && return 0 mv ${index_file} ${index_file}.last echo "${redirect_content}" > ${index_file} chmod 644 ${index_file} chown -R ${tomcat_user} ${index_file} chgrp -R ${tomcat_group} ${index_file} echo "Redirect setup to /${web_app_tld}" return 0 } check_for_my_ip() { debug_print "Checking for IP address(es)..." local my_ip_address local my_ip_addresses=($(ifconfig | grep "inet[^6]" | awk '$0 !~ /127.0.0.1/ { gsub (" *inet [^:]*:",""); print $1}')) get_property esgf_host_ip 2> /dev/null local matched=0 [ -n "${esgf_host_ip}" ] && (( ! force_install )) && echo "Using IP: ${esgf_host_ip}" && return 0 #We want to make sure that the IP address we have in our config #matches one of the IPs that are associated with this host for (( i=0; i < ${#my_ip_addresses[@]}; i++ )); do [ -n "${esgf_host_ip}" ] && [ "${esgf_host_ip}" = "${my_ip_addresses[$i]}" ] && ((matched++)) done ((matched == 0)) && echo "Configured host IP address does not match available IPs..." if [ -z "${esgf_host_ip}" ] || ((force_install)) || ((matched == 0)); then if (( ${#my_ip_addresses[@]} > 1 )); then #ask the user to choose... while [ 1 ]; do printf "Detected multiple IP addresses bound to this host...\n" printf "Please select the IP address to use for this installation\n" printf "\t-------------------------------------------\n" for (( i=0; i < ${#my_ip_addresses[@]}; i++ )); do printf "\t[$i] : ${my_ip_addresses[$i]}\n" done printf "\t-------------------------------------------\n" local default=0 read -e -p "select [] > " choice [ -z "${choice}" ] && continue my_ip_address=${my_ip_addresses[${choice}]} echo "selected address -> ${my_ip_addresses[${choice}]}" [ -n "${my_ip_address}" ] && break done else my_ip_address=${my_ip_addresses[0]} fi fi write_as_property esgf_host_ip ${my_ip_address} #Set local arrived at value to variable in global scope get_property esgf_host_ip } #Needed to reduce the number of commands when wanting to make a verbose conditional print verbose_print() { ((VERBOSE)) && echo $@; return 0; } debug_print() { ((DEBUG)) && echo -e $@ >&2; return 0; } #NOTE: This is another **RedHat/CentOS** specialty thing (sort of) #arg1 - min value of shmmax in MB (see: /etc/sysctl.conf) check_shmmax() { get_property kernel_shmmax 48 local set_value_mb=${1:-${kernel_shmmax}} #default is 40MB + headroom = 48MB let set_value_bytes=$((set_value_mb*1024*1024)) local cur_value_bytes=$(sysctl -q kernel.shmmax | tr -s "=" | cut -d= -f2) let cur_value_bytes=${cur_value_bytes## } if ((cur_value_bytes < set_value_bytes)); then echo "Current system shared mem value too low [$cur_value_bytes bytes] changing to [$set_value_bytes bytes]" sysctl -w kernel.shmmax=${set_value_bytes} sed -i.bak 's/\(^[^# ]*[ ]*kernel.shmmax[ ]*=[ ]*\)\(.*\)/\1'${set_value_bytes}'/g' /etc/sysctl.conf write_as_property kernel_shmmax ${set_value_mb} fi } uninstall() { local doit="N" read -e -p "Are you sure you want to uninstall? [y/N]: " doit if [ "$doit" = "y" ] || [ "$doit" = "Y" ]; then (source ${scripts_dir}/esg-web-fe >& /dev/null && clean_web_fe_webapp_subsystem) (source ${scripts_dir}/esg-idp >& /dev/null && clean_idp_webapp_subsystem) (source ${scripts_dir}/esg-orp >& /dev/null && clean_orp_webapp_subsystem) (source ${scripts_dir}/esg-search >& /dev/null && clean_search_subsystem) (source ${scripts_dir}/esg-security >& /dev/null && clean_security_database_subsystem_installation) (source ${scripts_dir}/esg-desktop >& /dev/null && clean_desktop_webapp_subsystem) (source ${scripts_dir}/esg-dashboard >& /dev/null && clean_dashboard_webapp_subsystem && clean_dashboard_database_subsystem_installation) (source ${scripts_dir}/esg-node-manager >& /dev/null && clean_node_manager_webapp_subsystem && clean_node_manager_database_subsystem_installation) doit="N" if [ -e ${tomcat_install_dir}/webapps/thredds ]; then read -e -p "remove Thredds web service? (${tomcat_install_dir}/webapps/thredds) [y/N]: " doit if [ "doit" = "Y" ] || [ "$doit" = "y" ]; then echo "removing ${tomcat_install_dir}/webapps/thredds" rm -rf ${tomcat_install_dir}/webapps/thredds [ $? != 0 ] && echo "ERROR: Unable to remove ${tomcat_install_dir}/webapps/thredds" fi fi doit="N" if [ -e $tomcat_install_dir ]; then read -e -p "remove apache tomcat? ($tomcat_install_dir) [y/N]: " doit if [ "doit" = "Y" ] || [ "$doit" = "y" ]; then echo "removing $tomcat_install_dir" rm -ri $tomcat_install_dir [ $? != 0 ] && echo "ERROR: Unable to remove ${tomcat_install_dir}" fi fi doit="N" if [ -e $cdat_home ]; then read -e -p "remove cdat? ($cdat_home) [y/N]: " doit if [ "doit" = "Y" ] || [ "$doit" = "y" ]; then echo "removing $cdat_home" rm -rf ${cdat_home} [ $? != 0 ] && echo "ERROR: Unable to remove ${cdat_home}" fi fi doit="N" if [ -e ${publisher_home}/${publisher_config} ]; then read -e -p "remove publisher configuration directory? (${publisher_home}) [y/N]: " doit if [ "doit" = "Y" ] || [ "$doit" = "y" ]; then echo "removing ${publisher_home}" rm -rf ${publisher_home} [ $? != 0 ] && echo "ERROR: Unable to remove ${publisher_home}}" fi fi doit="N" if [ -e $postgress_install_dir ]; then read -e -p "remove postgress? ($postgress_install_dir) [y/N]: " doit if [ "doit" = "Y" ] || [ "$doit" = "y" ]; then stop_postgress echo "removing $postgress_install_dir" rm -rf ${postgress_install_dir} [ $? != 0 ] && echo "ERROR: Unable to remove ${postgress_install_dir}" fi fi doit="N" if [ -e ${globus_location}/esg_${progname}_installed ] && (( ! no_globus )); then read -e -p "remove globus certs? ($globus_location) [y/N]: " doit if [ "doit" = "Y" ] || [ "$doit" = "y" ]; then echo "removing $globus_location" [ -n ${globus_location} ] && [ -e ${globus_location} ] && rm -rf ${globus_location} [ $? != 0 ] && echo "ERROR: Unable to remove ${globus_location}" fi fi # TODO Placeholder for compute tools and languages uninstall stop fi exit 0 } set_classpath() { local CP="." CP=${CP}:${node_manager_app_home}/WEB-INF/classes for i in $( ls ${node_manager_app_home}/WEB-INF/lib ); do CP=${CP}:${node_manager_app_home}/WEB-INF/lib/"$i" done CP=${CP}:${CATALINA_HOME}/lib/servlet-api.jar CLASSPATH=${CP} export CLASSPATH return 0; } install_bash_completion_file() { if (type rpm >& /dev/null) && [ ! -e "/etc/bash_completion" ]; then debug_print "Oh you should have bash completions... :-) " mkdir -p ${workdir} || return 1 pushd ${workdir} >& /dev/null checked_get ${bash_completion_url} (( $? > 1 )) && popd >& /dev/null && return 2 rpm -ivh ${bash_completion_url##*/} [ $? != 0 ] && popd >& /dev/null && return 3 source /etc/bash_completion popd >& /dev/null fi [ -e "/etc/bash_completion" ] && \ [ ! -e "/etc/bash_completion.d/esg-node" ] && \ checked_get /etc/bash_completion.d/esg-node ${esg_dist_url}/esgf-installer/esg-node.completion } _set_no_auto_fetch_file() { touch ${esg_root_dir}/.no_auto_fetch_certs && chmod 600 ${esg_root_dir}/.no_auto_fetch_certs } _is_no_auto_fetch_certs_file_present() { [ -e ${esg_root_dir}/.no_auto_fetch_certs ] } _remove_no_auto_fetch_certs_file() { [ -e ${esg_root_dir}/.no_auto_fetch_certs ] && rm ${esg_root_dir}/.no_auto_fetch_certs || return 0 printf " ----------------------------------------------------------- BE ADVISED... The automatic loading of federation certificates has been suspended during THIS current instantiation of the ESGF node. If you wish to suspend auto loading of federation certificates beyond this instance please use the \"--set-auto-fetch-certs off\" flag. ----------------------------------------------------------- " } #------------------------------------------------------------------ #NOTE: this function DOES DEPEND on the format of the node manager's # registration.xml. If the format of the file changes, please # make sure this function is appropriately updated #------------------------------------------------------------------ # # Checks this node's group membership against the passed in target node # The default is to check against the default peer currently assigned # Returns true (0) if there IS an intersection, false (1) otherwise check_for_group_intersection_with() { [ -z ${node_peer_group} ] && get_property node_peer_group [ -z ${esgf_default_peer} ] && get_property esgf_default_peer [ -z ${esgf_idp_peer} ] && get_property esgf_idp_peer [ -z ${esgf_index_peer} ] && get_property esgf_index_peer local target_nodes=($@) (( 0 == ${#target_nodes[@]} )) && target_nodes=(${esgf_default_peer} ${esgf_idp_peer} ${esgf_index_peer}) echo local ret=0 local visited=() for target_node in ${target_nodes[@]}; do grep -q ${target_node} <<<${visited[@]} && debug_print " already visited ${target_node}" && continue local target_groups=$(curl -s http://${target_node}/esgf-node-manager/registration.xml | sed -n 's#.*nodePeerGroup="\([^"]*\)".*hostname="'${target_node}'".*#\1#p') if [ -z "${target_groups}" ]; then printf " Warning: Could not glean peer group membership from target peer (${target_node})\n" ((ret=ret+100)) continue fi #Histogram threshold approach... turns out to be easier on memory and faster than above... GO Bash/AWK!!! (snort laugh) #Thanks Estani, I should use the "here statements" more often just got used to 'cat <(echo ${foo})' instead of <<<${foo} local shared_groups=($(awk 'BEGIN {RS=",";ORS=" "} {a[$1]+=1} END {for (i in a) if (a[i] >1) print i}'<<<"${target_groups},${node_peer_group}")) #Let the user know what you've found... if ((${#shared_groups[@]} == 0)); then echo " WARNING: This node does not share any peer groups with default peer node $target_node :-(" echo " Please set a different default peer with: --set-default-peer " echo ((ret++)) else echo " This node shares the group(s) [$(echo ${shared_groups[@]} | sed 's# #, #g')] with ${target_node} ($( [ "${esgf_default_peer}" = "${target_node}" ] && echo "default ")$( [ "${esgf_idp_peer}" = "${target_node}" ] && echo "idp ")$( [ "${esgf_index_peer}" = "${target_node}" ] && echo "index")) $([OK])" fi visited=( ${visited[@]} ${target_node}) done echo return ${ret} } #Will return true (0) if compute type is determined to be installed. #Will return false (!=0) if compute type should NOT be installed sanity_check_for_hints_todo_compute() { debug_print "checking for compute hints..." local default_answer="n" local answer if $(sed -n '/las_configure[ ]*=[ ]*true/p' ${esg_root_dir}/config/esgcet/esg.ini 2> /dev/null | grep -q true); then echo "Detected that the publisher configuration indicates LAS functionality is needed." default_answer="Y" read -e -t 120 -p " Would you like to install the \"COMPUTE\" configuration to support this ? $([ ${default_answer} = "Y" ] && echo "[Y/n]" || echo "[y/N]")" answer [ -z "${answer}" ] && answer=$(tr 'A-Z' 'a-z' <<< ${default_answer}) ans2=$(tr 'A-Z' 'a-z' <<< ${answer}) answer=$ans2 if [ "${answer}" != "y" ] && [ "${answer}" != "yes" ]; then echo "Will NOT install \"COMPUTE\" configuration..." return 1 else echo "User wants \"COMPUTE\" node type to also be installed" return 0 fi else echo "No compute configuration hints found.... continuing..." return 2 fi return 3 } # Does an md5 check between local and remote resource # returns 0 (success) iff there is no match and thus indicating that # an update is available. # USAGE: checked_for_update [file] http://www.foo.com/file # check_for_update() { local local_file local remote_file if (( $# == 1 )); then remote_file=${1} local_file=$(readlink -f ${1##*/}) elif (( $# >= 2 )); then local_file=${1} remote_file=${2} else echo "function \"checked_for_update\": Called with incorrect number of args! (fatal)" exit 1 fi [ ! -e ${local_file} ] && echo " Hmmm... Could not find local file ${local_file}" && return 0 #[ ! -x ${local_file} ] && echo " Hmmm... local file ${local_file} not executible" && chmod 755 ${local_file} diff <(md5sum ${local_file} | tr -s " " | cut -d " " -f 1) <(curl -s -L --insecure ${remote_file}.md5 | tr -s " " | cut -d " " -f 1) >& /dev/null [ $? != 0 ] && echo " Update Available @ ${remote_file}" && return 0 echo " ==> ${local_file} is up to date" return 1 } # If an update is available then pull it down... then check the md5 sums again! # # Yes, this results in 3 network calls to pull down a file, but it # saves total bandwidth and it also allows the updating from the # network process to be cronttab-able while parsimonious with # resources. It is also very good practice to make sure that code # being executed is the RIGHT code! # # The 3rd token is the "force" flag value 1|0. # 1 = do not check for update, directly go and fetch the file regardless # 0 = first check for update availability. (default) # # The 4th token is for indicated whether a backup file should be made flag value 1|0. # 1 = yes, create a .bak file if the file is already there before fetching new # 0 = no, do NOT make a .bak file even if the file is already there, overwrite it # # (When using the force flag you MUST specify the first two args!!) # # NOTE: Has multiple return values test for (( $? > 1 )) when looking or errors # A return value of 1 only means that the file is up-to-date and there # Is no reason to fetch it. # # USAGE: checked_get [file] http://www.foo.com/file [<1|0>] [<1|0>] # checked_get() { local force_get=${3:-0} local make_backup_file=${4:-1} #default to make backup *.bak files if necessary local local_file local remote_file if (( $# == 1 )); then remote_file=${1} local_file=${1##*/} elif (( $# >= 2 )); then local_file=${1} remote_file=${2} else echo "function \"checked_get\": Called with incorrect number of args! (fatal) args[$@]" echo " usage: checked_get [] [force_get (0*|1)] [make_backup_file(0|1*)]" exit 1 fi if (_is_in_git "${local_file}") ; then printf "${local_file} is controlled by Git, not updating" return 0 fi if ((use_local_files)) && [ -e "${local_file}" ]; then printf " *************************************************************************** ALERT.... NOT FETCHING ANY ESGF UPDATES FROM DISTRIBUTION SERVER!!!! USING LOCAL FILE file: $(readlink -f ${local_file}) ***************************************************************************\n\n" return 0 fi if ((force_get == 0)); then check_for_update $@ [ $? != 0 ] && return 1 fi if [ -e ${local_file} ] && ((make_backup_file)) ; then cp -v ${local_file} ${local_file}.bak chmod 600 ${local_file}.bak fi echo "Fetching file from ${remote_file} -to-> ${local_file}" wget --no-check-certificate --progress=bar:force -O ${local_file} ${remote_file} [ $? != 0 ] && echo " ERROR: Problem pulling down [${remote_file##*/}] from esg distribution site" && return 2 diff <(md5sum ${local_file} | tr -s " " | cut -d " " -f 1) <(curl -s -L --insecure ${remote_file}.md5 |head -1| tr -s " " | cut -d " " -f 1) >& /dev/null [ $? != 0 ] && echo " WARNING: Could not verify file! ${local_file}" && return 3 echo "[VERIFIED]" return 0 } #backup() { #See esg-functions file } backup_db() { local my_node_db_name=${db_database} local my_schema_name echo -n "backup db: " while [ -n "$1" ]; do case ${1} in -s | --schema) shift [ "esgf" = "${1%%_*}" ] && my_schema_name="${1#*_}" || my_schema_name="${1}" ;; -db | --database) shift my_node_db_name=${1} ;; *) echo " Unknown flag [${1}]" echo " Valid flags: [-s|--schema] [-db|--database] " return 2 ;; esac shift done echo -n "database = [${my_node_db_name}] " [ -n "${my_schema_name}" ] && echo -n "schema = [esgf_${my_schema_name}]" echo mkdir -p ${esg_backup_dir} pushd ${esg_backup_dir} >& /dev/null start_postgress local backup_file #back up the schema specifically, if one is provided if [ -n "${my_schema_name}" ]; then my_schema_name=_${my_schema_name} backup_file_root="${my_node_db_name}_esgf${my_schema_name}_backup" echo "Backing up esgf schema esgf${my_schema_name} of ${my_node_db_name} -to-> ${backup_file_root}_$(date ${date_format}).sql.gz " debug_print "pg_dump -U ${postgress_user} --schema=esgf${my_schema_name} ${my_node_db_name} > ${my_node_db_name}_esgf${my_schema_name}_backup_$(date ${date_format}).sql.gz" PGPASSWORD=${PGPASSWORD:-${pg_sys_acct_passwd}} pg_dump -U ${postgress_user} --schema=esgf${my_schema_name} ${my_node_db_name} | gzip > ${my_node_db_name}_esgf${my_schema_name}_backup_$(date ${date_format}).sql.gz [ $? == 0 ] && [OK] || ([FAIL] && return 1) else backup_file_root="${my_node_db_name}_backup" echo "Backing up database: ${my_node_db_name} -to-> ${backup_file_root}_$(date ${date_format}).sql.gz " debug_print "pg_dump -U ${postgress_user} ${my_node_db_name} > ${my_node_db_name}_backup_$(date ${date_format}).sql.gz" PGPASSWORD=${PGPASSWORD:-${pg_sys_acct_passwd}} pg_dump -U ${postgress_user} ${my_node_db_name} | gzip > ${my_node_db_name}_backup_$(date ${date_format}).sql.gz [ $? == 0 ] && [OK] || ([FAIL] && return 1) fi #------------- #keep only the last num_backups_to_keep files local num_backups_to_keep=${num_backups_to_keep:-7} pushd ${esg_backup_dir} >& /dev/null local files=(`ls -t | grep ${backup_file_root}.\*.sql | tail -n +$((${num_backups_to_keep}+1)) | xargs`) if (( ${#files[@]} > 0 )); then echo "Tidying up a bit..." echo "${#files[@]} old backup files to remove: ${files[@]}" rm -v ${files[@]} fi popd >& /dev/null #------------- popd >& /dev/null return 0 } restore_nth_db_backup() { local my_node_db_name=${db_database} local my_schema_name local n_recent=1 echo -n "restore db: " while [ -n "$1" ]; do case ${1} in -s | --schema) shift my_schema_name=${1} ;; -db | --database) shift my_node_db_name=${1} ;; -n) shift n_recent=${1} ;; *) echo " Unknown flag [${1}]" echo " Valid flags: [-s|--schema] [-db|--database] -n " return 2 ;; esac shift done [ ! -d ${esg_backup_dir} ] && echo " ERROR: cannot locate backup directory [${esg_backup_dir}]" && return 1 local n_total echo -n "database = [${my_node_db_name}] " && n_total=$(ls -l ${esg_backup_dir} | grep ${my_node_db_name}_backup_ | wc -l) [ -n "${my_schema_name}" ] && echo -n "schema = [${my_schema_name}]" && n_total=$(ls -l ${esg_backup_dir} | grep ${my_node_db_name}_esgf_${my_schema_name}_backup_ | wc -l) echo ((n_recent > n_total)) && printf "\n Sorry, there are not ${n_recent} backups present, only ${n_total} are available\n\n" && return 2 local default_answer="Y" #always be safe. read -e -p "Would you like to first make a back up of the existing database? [Y/n] " answer [ -n "${answer}" ] && answer=$(echo ${answer} | tr 'a-z' 'A-Z') || answer=${default_answer} [ "${answer}" = "Y" ] && backup_db -db ${my_node_db_name} -s ${my_schema_name} && ((n_recent++)) start_postgress pushd ${esg_backup_dir} >& /dev/null local backup_file #back up the schema specifically, if one is provided if [ -n "${my_schema_name}" ]; then my_schema_name=_${my_schema_name} backup_file=$(ls -rt | grep ${my_node_db_name}_esgf${my_schema_name}_backup_ | tail -n ${n_recent} | head -n 1) echo "Restoring backup esgf schema esgf${my_schema_name} of ${my_node_db_name} <-from- ${backup_file}" debug_print "psql -U ${postgress_user} ${my_node_db_name} < ${backup_file}" PGPASSWORD=${PGPASSWORD:-${pg_sys_acct_passwd}} psql -U ${postgress_user} ${my_node_db_name} < <(zcat ${backup_file}) [ $? == 0 ] && echo " $([OK]) " || (echo " $([FAIL]) " && return 1) else backup_file=$(ls -rt | grep ${my_node_db_name}_backup_ | tail -n ${n_recent} | head -n 1) echo "Restoring backup ${my_node_db_name} <-from- ${backup_file}" debug_print "psql -U ${postgress_user} ${my_node_db_name} < ${backup_file}" PGPASSWORD=${PGPASSWORD:-${pg_sys_acct_passwd}} psql -U ${postgress_user} ${my_node_db_name} < <(zcat ${backup_file}) [ $? == 0 ] && [OK] || ([FAIL] && return 1) fi popd >& /dev/null return 0 } #configures the database i.e. sets up table schema #based on the the node type. config_db() { init local let mysel=${1:-${sel:-4}} ((DEBUG)) && echo "Database configuration... (selection = ${mysel})" #------- #IMPORTANT! The SUM of all the handled types is the real max value #here! (else can't decrement down to 0 and get into an infinite loop) local myMAX_BIT=$((DATA_BIT+IDP_BIT)) ((mysel > myMAX_BIT)) && echo -n "Adjusting selection to bounds... " && ((mysel=myMAX_BIT)) && echo "(selection = ${mysel})" #------- echo echo "*******************************" echo -n "Configuring Postgres... " && show_type echo "*******************************" echo start_postgress #------------------------------------------------------------------------ #Based on the node type selection we build the appropriate database tables #------------------------------------------------------------------------ while (( (mysel >= MIN_BIT) && (mysel <= myMAX_BIT) )); do ((DEBUG)) && echo " mysel = ${mysel} : " if (( (mysel & DATA_BIT) != 0 )); then echo "Setting up database for DATA node type" source ${scripts_dir}/esg-node-manager >& /dev/null && configure_postgress ${sel} ((mysel-=DATA_BIT)) elif (( (mysel & IDP_BIT) != 0 )); then echo "Setting up database for IDP node type" source ${scripts_dir}/esg-security >& /dev/null && configure_postgress ${sel} ((mysel-=IDP_BIT)) fi echo done ((DEBUG)) && echo "*mysel = ${mysel}" popd >& /dev/null echo echo checked_done 0 } #Replace a pattern inside the target file with the contents of the input file insert_file_at_pattern() { local target_file=$1 local input_file=$2 local pattern=$3 echo "Inserting into ${target_file} <- ${input_file} at pattern ${pattern}" python -c "infile = '${target_file}';filterfile = '${input_file}';pattern='${pattern}';f=open(infile);s=f.read();f.close();f=open(filterfile);filter = f.read();f.close();s=s.replace(pattern,filter);f=open(infile,'w');f.write(s);f.close()" ret=$? [ $ret != 0 ] && echo "Problem in function insert_file_at_pattern in $0" return ${ret} } ### "private" functions ### _readlinkf() { # This is a portable implementation of GNU's "readlink -f" in # bash/zsh, following symlinks recursively until they end in a # file, and will print the full dereferenced path of the specified # file even if the file isn't a symlink. # # Loop detection exists, but only as an abort after passing a # maximum length. local start_dir=$(pwd) local file=${1} cd $(dirname ${file}) file=$(basename ${file}) # Iterate down symlinks. If we exceed a maximum number symlinks, assume that # we're looped and die horribly. local maxlinks=20 local count=0 local current_dir while [ -L "${file}" ] ; do file=$(readlink ${file}) cd $(dirname ${file}) file=$(basename ${file}) ((count++)) if (( count > maxlinks )) ; then current_dir=$(pwd -P) echo "CRITICAL FAILURE[4]: symlink loop detected on ${current_dir}/${file}" cd ${start_dir} exit ${count} fi done current_dir=$(pwd -P) echo "${current_dir}/${file}" cd ${start_dir} } _is_in_git() { # This determines if a specified file is in a git repository. # This function will resolve symlinks and check for a .git # directory in the directory of the actual file as well as its # parent to avoid attempting to call git unless absolutely needed, # so as to be able to detect some common cases on a system without # git actually installed and in the path. # # Accepts as an argument the file to be checked # # Returns 0 if the specified file is in a git repository # # Returns 2 if it could not detect a git repository purely by file # position and git was not available to complete a rev-parse test # # Returns 1 otherwise debug_print "DEBUG: Checking to see if ${1} is in a git repository..." REALDIR=$(dirname $(_readlinkf ${1})) GITEXEC=`which git` if [ ! -e $1 ] ; then debug_print "DEBUG: ${1} does not exist yet, allowing creation" return 1 fi if [ -d "${REALDIR}/.git" ] ; then debug_print "DEBUG: ${1} is in a git repository" return 0 fi if [ -d "${REALDIR}/../.git" ] ; then debug_print "DEBUG: ${1} is in a git repository" return 0 fi if [ -z ${GITEXEC} ] ; then debug_print "DEBUG: git is not available to finish checking for a repository -- assuming there isn't one!" return 2 fi if $(cd ${REALDIR} && git rev-parse 2>/dev/null) ; then debug_print "DEBUG: ${1} is in a git repository" return 0 fi return 1 } _verify() { echo "diff <(md5sum ${0} | tr -s " " | cut -d " " -f 1) <(curl ${1}/${0##*/}.md5 | tr -s " " | cut -d " " -f 1) >& /dev/null " diff <(md5sum ${0} | tr -s " " | cut -d " " -f 1) <(curl -s -L --insecure ${1}/${0##*/}.md5 | tr -s " " | cut -d " " -f 1) >& /dev/null [ $? != 0 ] && return 3 echo "[VERIFIED]" return 0 } self_verify() { # Test to see if the esg-node script is currently being pulled from git, and if so skip verification if (_is_in_git ${0}) ; then echo "Git repository detected; not checking checksum of esg-node" return 0 fi [ -z "$(echo ${script_version} | sed -n 's/-devel/\0/p')" ] && devel=0 || devel=1 _verify ${esg_dist_url_root}$( ((devel == 1)) && echo "/devel" || echo "")/esgf-installer/$script_maj_version >& /dev/null local ret=$? if (( ${ret} == 3 )); then printf "WARNING: $0 could not be verified!! \n(This file, ${0}, may have been tampered with or there is a newer version posted at the distribution server.\nPlease update this script.)\n\n" local choice="x" if [ "$#" == 1 ]; then debug_print "Operation $1 was chosen" choice=$1 else local input="" read -e -t $((1*60)) -p "Do you wish to Update and exit [u], continue anyway [c] or simply exit [x]? [u/c/X]: " input [ -n "${input}" ] && choice=${input} unset input fi case ${choice} in c | C) echo "Continuing..." echo return $ret ;; u | U | --update | update) echo "Updating local script with script from distribution server..." /usr/local/bin/esg-bootstrap $( ((devel == 1)) && echo "--devel" || echo "") local update_ret=$? ((update_ret == 0)) && checked_get ${esg_functions_file} ${esg_dist_url}/esgf-installer/${script_maj_version}/${esg_functions_file##*/} && echo "(w/ functions file)" echo "Please re-run this updated script $0" echo exit $update_ret ;; x | X) echo "Exiting..." echo exit 1 ;; *) echo "Unknown option: [$choice] - Exiting" exit 1 ;; esac fi return 0 } #Helper method for reading the last state of node type config from config dir file "config_type" #Every successful, explicit call to --type|-t gets recorded in the "config_type" file #If the configuration type is not explicity set the value is read from this file. read_sel() { debug_print "read_sel ${sel}" local let mysel=${sel} debug_print "mysel = ${mysel}" #If mysel (our private copy of sel) has not bits in the type bits range then read the config_type file #and set us to whatever value we currently add plus that one. In the case of installation or testing #those values are below the range so they shoudl not be looked at for type selection though they may be #set. However whatever we have for the type selection should be added to them for compound selection. if (( (mysel < MIN_BIT) || (mysel > MAX_BIT) )); then debug_print "Out of Range ${mysel} < ${MIN_BIT} || ${mysel} > ${MAX_BIT}" local last_config_type=$(cat ${esg_config_type_file} 2> /dev/null) ((mysel+= ${last_config_type:-0})) debug_print "mysel is now: ${mysel}" fi ((mysel == 0)) && \ printf "ERROR: No node type selected nor available! \n Consult usage with --help flag... look for the \"--type\" flag \n(must come BEFORE \"[start|stop|restart|update]\" args)\n\n" debug_print "Setting sel [${sel}] to mysel [${mysel}]" sel=${mysel} } #Write the node type numeric value to file #(Yes... gratuitous error and bounds checking) set_sel() { local new_sel=${1} debug_print "new_sel = $new_sel" local type_bits=0 local hit_bits=0 #valididty check for type... in range power of 2 #MIN and MAX BIT range... if so then valid and an be written down. if ((new_sel > $MAX_BIT || new_sel < $MIN_BIT)); then debug_print "WARNING: Selection [$1] is out of range $MIN_BIT - $MAX_BIT" fi #Check if the new sel has any bits turned on in the range of our type bits for ((type_bit=$MIN_BIT;type_bit<=$MAX_BIT;type_bit*=2)) ; do (( (new_sel & type_bit) != 0 )) && ((hit_bits+=type_bit)) done debug_print "[hit_bits = $hit_bits] =? [new_sel = $new_sel]" if((hit_bits)); then echo "${hit_bits}" > ${esg_config_type_file} ((DEBUG)) && cat ${esg_config_type_file} fi } show_svc_list() { id | grep root >& /dev/null [ $? != 0 ] && echo "(display of node process list limited to root)" && return 0 echo echo "---------------------------" echo "$(echo_strong Running Node Services...) "; ((sel != 0)) && show_type || echo "" echo "---------------------------" local command="lsof -Pni |egrep 'postgres|jsvc|globus-gr|java|myproxy' $(((! DEBUG)) && echo "| grep -v \(CLOSE_WAIT\)") $(((! VERBOSE)) && echo "|grep \(LISTEN\)")" eval ${command} lsof -Pni |egrep httpd | head -n1 local dashboard_pid=$(pgrep dashboard) [ -n "${dashboard_pid}" ] && echo "esgf-dash ${dashboard_pid}" echo "---------------------------" echo return 0 } #------------------------------- # ESG Node Life Cycle Functions (start/stop/status) #------------------------------- #Run commands that need to be done to set the stage before any of the aparatus begins their launch sequence. #Here is a good place to; copy files, set configurations, etc... BEFORE you start running and need them! #In the submodule script the convention is _startup_hook() (where module does not contain esg(f) prefix) run_startup_hooks() { echo_strong "Running startup hooks..." setup_sensible_confs ( esgcet_startup_hook ) ( tds_startup_hook ) (source ${scripts_dir}/esg-orp >& /dev/null && orp_startup_hook) (source ${scripts_dir}/esg-security >& /dev/null && security_startup_hook) (source ${scripts_dir}/esg-web-fe >& /dev/null && web_fe_startup_hook) # (source ${scripts_dir}/esg-desktop >& /dev/null && desktop_startup_hook) (source ${scripts_dir}/esg-search >& /dev/null && search_startup_hook) #------------------------------------------ #When starting up pull down necessary federation certificates #Default is to fetch them... unless explictly set otherwise (see --set-auto-fetch-certs flag) get_property node_auto_fetch_certs [ -z "${node_auto_fetch_certs}" ] && node_auto_fetch_certs="true" && write_as_property node_auto_fetch_certs if [ "${node_auto_fetch_certs}" = "true" ] && ! _is_no_auto_fetch_certs_file_present ; then echo -n "Fetching federation certificates... " && fetch_esgf_certificates >& /dev/null && [OK] || [FAIL] echo -n "Fetching federation truststore..... " && fetch_esgf_truststore >& /dev/null && [OK] || [FAIL] fi _is_no_auto_fetch_certs_file_present && _remove_no_auto_fetch_certs_file #------------------------------------------ } #Any last bits of cleanup for your module should be called here. #As with the startup hooks the function convention is _shutdown_hook() (where module does not contain esg(f) prefix) run_shutdown_hooks() { echo_strong "Running shutdown hooks..." } #Starts the esg node start() { local let sel=${1:-${sel:-0}} debug_print "starting services... ($1)" read_sel [ $((sel & ALL_BIT)) == 0 ] && echo "Cannot Start: No Node Type Specified! (See --help for info on the --type|-t option) :-|" && exit 1 echo "$(echo_strong "starting services...") ($sel)" run_startup_hooks /etc/init.d/ntpd status if [ $? != 0 ]; then /etc/init.d/ntpd start fi [ $((sel & ALL_BIT)) != 0 ] && start_postgress [ $((sel & ALL_BIT)) != 0 ] && start_tomcat [ $((sel & ALL_BIT)) != 0 ] && (source ${scripts_dir}/esg-dashboard >& /dev/null && start_dashboard_services) source $esgf_nm_funcs_file && nm_start (( ! no_globus )) && [ $((sel & DATA_BIT+IDP_BIT)) != 0 ] && start_globus ${sel} ${gridftp_config} [ $((sel & INDEX_BIT)) != 0 ] && (source ${scripts_dir}/esg-search >& /dev/null && start_search_services ${index_config}) sleep 3 /etc/init.d/esgf-httpd status #/usr/sbin/apachectl status if [ $? != 0 ]; then /etc/init.d/esgf-httpd start #echo 'Executing: /usr/sbin/apachectl -f /etc/httpd/conf/esgf-httpd.conf -k start' #/usr/sbin/apachectl -f /etc/httpd/conf/esgf-httpd.conf -k start fi show_svc_list set_sel "${sel}" echo } #Stops the esg node stop() { local sel=${1:-${sel:-$ALL_BIT}} read_sel (source ${scripts_dir}/esg-search >& /dev/null && stop_search_services) stop_globus $(( sel == 0 ? ALL_BIT : sel)) ${gridftp_config} (source ${scripts_dir}/esg-dashboard >& /dev/null && stop_dashboard_services) source $esgf_nm_funcs_file && nm_stop stop_tomcat stop_postgress #/usr/sbin/apachectl stop /etc/init.d/esgf-httpd stop run_shutdown_hooks show_svc_list } #Displays the status of the node... status() { #TODO conditionally reflect the status of globus (gridftp) process read_sel local ret=1 if check_postgress_process && check_tomcat_process && check_esgf_httpd_process; then echo echo "$(echo_strong "Node Running...") (${sel})" ret=0 else echo echo "$(echo_strong "Stopped:") At least one process not running" ret=1 fi source $esgf_nm_funcs_file && nm_status #This is here for sanity checking... show_svc_list return ${ret} } initial_setup_questionnaire() { echo echo "-------------------------------------------------------" echo 'Welcome to the ESGF Node installation program! :-)' echo mkdir -p ${esg_config_dir} pushd ${esg_config_dir} >& /dev/null local input local default get_property esgf_host ${esgf_host} if [ -z "${esgf_host}" ] || ((force_install)); then unset input default=${esgf_host:-$(hostname --fqdn)} defaultdomain=$(echo ${default} | cut -f 2- -d '.' -s) if [ X"${default}" = "X" ] ; then default="localhost.localdomain" elif [ X"${defaultdomain}" = "X" ] ; then default="${default}.localdomain" fi read -e -p "What is the fully qualified domain name of this node? [${default}]: " input [ -z "${input}" ] && esgf_host=${default} && write_as_property esgf_host [ -n "${input}" ] && esgf_host=${input} && write_as_property esgf_host else echo "esgf_host = [${esgf_host}]" write_as_property esgf_host fi unset input unset default security_admin_password=$(cat ${esgf_secret_file} 2> /dev/null) if [ -z "${security_admin_password}" ] || ((force_install)); then while [ 1 ]; do unset input read -e -s -p "What is the admin password to use for this installation? (alpha-numeric only) [$([ -n "${security_admin_password}" ] && echo "*******")]: " input ((force_install)) && [ -z "${input}" ] && [ -n "${security_admin_password}" ] && changed=0 && echo && break ( [ -z "${input}" ] || [ -n "$(echo ${input} | sed -n -e 's/[a-zA-Z0-9]*//p')" ] ) && echo "Invalid password... " && continue ( [ -z "${input}" ] || (( ${#input} < 6 )) ) && echo "Sorry password must be at least six characters :-( " && continue [ -n "${input}" ] && security_admin_password=${input} && changed=1 echo read -e -s -p "Please re-enter password: " verify_password if [ "${security_admin_password}" = "${verify_password}" ] ; then echo changed=1 break else echo "Sorry, values did not match" echo changed=0 fi done unset verify_password unset input ((changed==1)) && echo ${security_admin_password} > ${esgf_secret_file} chmod 640 ${esgf_secret_file} if [ ! $(getent group ${tomcat_group}) ]; then /usr/sbin/groupadd -r ${tomcat_group} [ $? != 0 ] && [ $? != 9 ] && echo "ERROR: *Could not add tomcat system group: ${tomcat_group}" && popd && checked_done 1 fi chown ${installer_uid}:${tomcat_group} ${esgf_secret_file} #Use the same password when creating the postgress account ((changed==1)) && pg_sys_acct_passwd=${security_admin_password} fi [ -e "${esgf_secret_file}" ] && chmod 640 ${esgf_secret_file} && chown ${installer_uid}:${tomcat_group} ${esgf_secret_file} [ ! -e "${pg_secret_file}" ] && echo "${pg_sys_acct_passwd}" > ${pg_secret_file} [ -e "${pg_secret_file}" ] && chmod 640 ${pg_secret_file} && chown ${installer_uid}:${tomcat_group} ${pg_secret_file} unset input get_property esg_root_id ${esg_root_id} if [ -z "${esg_root_id}" ] || ((force_install)); then while [ 1 ]; do unset input default=$(echo `hostname -s`.`hostname --domain` | awk -F. ' {print $(NF-1)} ') read -e -p "What is the name of your organization? [${default}]: " input [ -z "${input}" ] && esg_root_id=${default} && write_as_property esg_root_id && break [ -n "${input}" ] && esg_root_id=$(echo ${input} | sed 's/ /_/g') && write_as_property esg_root_id && break done else echo "esg_root_id = [${esg_root_id}]" fi unset input get_property node_short_name ${node_short_name} if [ -z "${node_short_name}" ] || ((force_install)); then while [ 1 ]; do unset input read -e -p "Please give this node a \"short\" name: [${node_short_name}]: " input [ -z "${input}" ] && [ -n "${node_short_name}" ] && input=${node_short_name} && break [ -n "${input}" ] && node_short_name=$(echo ${input} | sed 's/ /_/g') && write_as_property node_short_name && break done else echo "node_short_name = [${node_short_name}]" fi unset input get_property node_long_name ${node_long_name} if [ -z "${node_long_name}" ] || ((force_install)); then unset input read -e -p "Please give this node a more descriptive \"long\" name [${node_long_name}]: " input [ -n "${input}" ] && node_long_name="${input}" && write_as_property node_long_name else echo "node_long_name = [${node_long_name}]" fi get_property node_namespace if [ -z "${node_namespace}" ] || ((force_install)); then local node_namespace_=${node_namespace:-$(hostname --fqdn | sed -n 's/[^.]*\(.*\)/\1/p' | tac -s "." | xargs | sed -e 's# #.#g' | sed 's/\(.*\)[^ ]./\1/')} while [ 1 ]; do unset input read -e -p "What is the namespace to use for this node? (set to your reverse fqdn - Ex: \"gov.llnl\") [${node_namespace_}]: " input [ -z "${input}" ] && [ -n "${node_namespace_}" ] && write_as_property node_namespace ${node_namespace_} && break [ -n "${input}" ] && node_namespace=${input} && write_as_property node_namespace && break done else echo "node_namespace = [${node_namespace}]" fi get_property node_peer_group if [ -z "${node_peer_group}" ] || ((force_install)); then local node_peer_group_=${node_peer_group:-"esgf-test"} while [ 1 ]; do unset input read -e -p "What peer group(s) will this node participate in? (esgf-test|esgf-prod) [${node_peer_group_}]: " input [ -z "${input}" ] && [ -n "${node_peer_group_}" ] && write_as_property node_peer_group ${node_peer_group_} && break [ -n "${input}" ] && node_peer_group=${input} && write_as_property node_peer_group && break done else echo "node_peer_group = [${node_peer_group}]" fi unset input get_property esgf_default_peer ${esgf_default_peer} if [ -z "${esgf_default_peer}" ] || ((force_install)); then unset input default=${esgf_default_peer:-${esgf_host:-$(hostname --fqdn)}} read -e -p "What is the default peer to this node? [${default}]: " input [ -z "${input}" ] && esgf_default_peer=${default} && write_as_property esgf_default_peer [ -n "${input}" ] && esgf_default_peer=${input} && write_as_property esgf_default_peer else echo "esgf_default_peer = [${esgf_default_peer}]" fi unset input unset default get_property esgf_index_peer ${esgf_index_peer} if [ -z "${esgf_index_peer}" ] || ((force_install)); then unset input default=${esgf_default_peer:-${esgf_host:-$(hostname --fqdn)}} read -e -p "What is the hostname of the node do you plan to publish to? [${default}]: " input [ -z "${input}" ] && esgf_index_peer=${default} && write_as_property esgf_index_peer [ -n "${input}" ] && esgf_index_peer=${input} && write_as_property esgf_index_peer else echo "esgf_index_peer = [${esgf_index_peer}]" fi unset input unset default get_property mail_admin_address ${mail_admin_address} if [ -z "${mail_admin_address}" ] || ((force_install)); then read -e -p "What email address should notifications be sent as? [${mail_admin_address}]: " input [ -n "${input}" ] && mail_admin_address=${input} && write_as_property mail_admin_address [ -z "${mail_admin_address}" ] && echo " (The notification system will not be enabled without an email address)" else echo "mail_admin_address = [${mail_admin_address}]" fi unset input get_property db_user ${db_user} get_property db_host ${db_host} get_property db_port ${db_port} get_property db_database ${db_database} get_property db_managed ${db_managed} if [ -z "${db_user}" ] || [ -z "${db_host}" ] || [ -z "${db_port}" ] || [ -z "${db_database}" ] || [ -z "${db_managed}" ] || ((force_install)); then _is_managed_db _get_db_conn_str_questionnaire else echo "db connection string = [postgresql://${db_user}@$([ -n "${db_host}" ] && ([ "${db_host}" = "${esgf_host}" ] || [ "${db_host}" = "localhost" ]) && echo "localhost" || echo ${db_host}):${db_port}/${db_database}] [external = ${db_managed}]" fi #-------------------------------- local input local default get_property publisher_db_user ${publisher_db_user} if [ -z "${publisher_db_user}" ] || ((force_install)); then unset input default=${publisher_db_user:-'esgcet'} read -e -p "What is the (low priv) db account for publisher? [${default}]: " input [ -z "${input}" ] && publisher_db_user=${default} && write_as_property publisher_db_user [ -n "${input}" ] && publisher_db_user=${input} && write_as_property publisher_db_user else echo "publisher_db_user = [${publisher_db_user}]" fi unset input unset default if [ -z "${publisher_db_user_passwd}" ] || ((force_install)); then read -e -s -p "What is the db password for publisher user (${publisher_db_user})? [$([ -n "${publisher_db_user_passwd}" ] && echo "*******")]: " input if [ -n "${input}" ]; then publisher_db_user_passwd=${input} echo "${publisher_db_user_passwd}" > ${pub_secret_file} fi fi [ ! -e "${pub_secret_file}" ] && echo "${publisher_db_user_passwd}" > ${pub_secret_file} [ -e "${pub_secret_file}" ] && chmod 640 ${pub_secret_file} && chown ${installer_uid}:${tomcat_group} ${pub_secret_file} echo "db publisher connection string = [postgresql://${publisher_db_user}@$([ -n "${db_host}" ] && ([ "${db_host}" = "${esgf_host}" ] || [ "${db_host}" = "localhost" ]) && echo "localhost" || echo ${db_host}):${db_port}/${db_database}]" #-------------------------------- dedup_properties ${config_file} echo "-------------------------------------------------------" echo echo popd >& /dev/null return 0 } _get_db_conn_str_questionnaire() { #postgresql://esgcet@localhost:5432/esgcet local ret=1 local user_ host_ port_ dbname_ connstring_ #Note the values referenced here should have been set by prior get_property *** calls #that sets these values in the script scope. (see the call in questionnaire function - above) if [ -n "${db_user}" ] && [ -n "${db_host}" ] && [ -n "${db_port}" ] && [ -n "${db_database}" ]; then connstring_="${db_user}@$([ -n "${db_host}" ] && ([ "${db_host}" = "${esgf_host}" ] || [ "${db_host}" = "localhost" ]) && echo "localhost" || echo ${db_host}):${db_port}/${db_database}" fi while [ 1 ]; do local input echo "Please enter the database connection string..." echo " (form: postgresql://[username]@[host]:[port]/esgcet)" get_property db_managed #(if it is a not a force install and we are using a LOCAL (NOT MANAGED) database then db_managed == "no") if [ -z "${connstring_}" ] && [ "${db_managed}" != "yes" ] && ((! force_install)) ; then connstring_="dbsuper@localhost:5432/esgcet" fi read -e -p "What is the database connection string? [postgresql://${connstring_}]: postgresql://" input [ -z "${input}" ] && input="${connstring_}" echo "entered: postgresql://${input}" [ -z "${input}" ] && continue eval $(sed -n 's#postgresql://\([^@]*\)@\([^:]*\):\([^/]*\)/\(esgcet\)[ ]*$#user_=\1 host_=\2 port_=\3 dbname_=\4#p' <(echo "postgresql://${input}")) if [ -z "${user_}" ] || \ [ -z "${host_}" ] || \ [ -z "${port_}" ] || \ [ -z "${dbname_}" ]; then echo "ERROR: Incorrect connection string syntax or values" ret=1 else ret=0 break fi done debug_print "user = $user_" debug_print "host = $host_" debug_print "port = $port_" debug_print "database = $dbname_" #set vars... db_user=${user_} db_host=$([ -n "${host_}" ] && ([ "${host_}" = "${esgf_host}" ] || [ "${host_}" = "localhost" ]) && echo "localhost" || echo "${host_}") db_port=${port_} db_database=${dbname_} #write vars to property file write_as_property db_user write_as_property db_host write_as_property db_port write_as_property db_database debug_print "ret = $ret" return $ret } ############################################ # Main ############################################ esgf_node_info() { printf " The goal of this script is to automate as many tasks as possible regarding the installation, maintenance and use of the ESGF software stack that is know as the \"ESGF Node\". A software stack is a collection of tools that work in concert to perform a particular task or set of tasks that are semantically united. The software stack is comprised of: Tomcat, Thredds, CDAT & CDMS, PostgreSQL, MyProxy, and several ESGF.org custom software applications running on a LINUX (RedHat/CentOS) operating system. Through the installation process there are different accounts that are created that facilitate the communication between the software stack entities. These credentials are internal to the stack. It is recommended that you use the defaults provided throughout this installation. The security impact with regards to the visibility and accessibility of the constituent components of the stack depends on other factors to be addressed by your organization. Please be sure that you have gotten your created an account on your ESGF IDP Peer. The primary IDP Peer for ESGF is esgf-node.llnl.gov You may register for an account at LLNL at the following URL: https://esgf-node.llnl.gov/user/add/ Note: Account creation is prerequisite for publication! ESGF P2P Node: ESGF P2P Node: --------- --------- |Tomcat | |Tomcat | | | <================= P2P =================> | | |-Thredds | |-Thredds | |-ORP | |-ORP | |---------| |---------| |CDAT/CDMS| |CDAT/CDMS| |---------| |---------| |Postgres | |Postgres | |---------| |---------| | MyProxy | <===(HTTPS)===> [ESGF Peer Node(s)]* | MyProxy | |---------| |---------| | GridFTP | <=============> [End User(s)]* | GridFTP | >---------< >---------< | CentOS | | CentOS | |(Virtual)| |(Virtual)| | Machine | | Machine | |---------| |---------| --------- --------- (Visit http://esgf.llnl.gov , http://github.com/ESGF/esgf.github.io/wiki for more information) \033[01;31m EEEEEEEEEEEEEEEEEEEEEE SSSSSSSSSSSSSSS GGGGGGGGGGGGGFFFFFFFFFFFFFFFFFFFFFF E::::::::::::::::::::E SS:::::::::::::::S GGG::::::::::::GF::::::::::::::::::::F E::::::::::::::::::::ES:::::SSSSSS::::::S GG:::::::::::::::GF::::::::::::::::::::F EE::::::EEEEEEEEE::::ES:::::S SSSSSSS G:::::GGGGGGGG::::GFF::::::FFFFFFFFF::::F E:::::E EEEEEES:::::S G:::::G GGGGGG F:::::F FFFFFF\033[0m \033[01;33m E:::::E S:::::S G:::::G F:::::F E::::::EEEEEEEEEE S::::SSSS G:::::G F::::::FFFFFFFFFF E:::::::::::::::E SS::::::SSSSS G:::::G GGGGGGGGGG F:::::::::::::::F E:::::::::::::::E SSS::::::::SS G:::::G G::::::::G F:::::::::::::::F E::::::EEEEEEEEEE SSSSSS::::S G:::::G GGGGG::::G F::::::FFFFFFFFFF\033[0m \033[01;32m E:::::E S:::::SG:::::G G::::G F:::::F E:::::E EEEEEE S:::::S G:::::G G::::G F:::::F EE::::::EEEEEEEE:::::ESSSSSSS S:::::S G:::::GGGGGGGG::::GFF:::::::FF E::::::::::::::::::::ES::::::SSSSSS:::::S GG:::::::::::::::GF::::::::FF E::::::::::::::::::::ES:::::::::::::::SS GGG::::::GGG:::GF::::::::FF EEEEEEEEEEEEEEEEEEEEEE SSSSSSSSSSSSSSS GGGGGG GGGGFFFFFFFFFFF.org \033[0m -ESGF.org \n\n" } usage() { printf " usage: * This program must be run as root or a root equiv shell: (sudo -s) * Deprecated flags are in $(echo_fail red), and followed by an \"*\". ${progname} ([--] | [start] | [stop] | [status] | [restart] --install - goes through the installation process will automatically start up node services --verify - runs the test code to verify installation --write-env - writes the necessary env vars to file ${envfile} --version - indicates the version of this script --check - checks if this script is the most up-to-date posted (Update|Continue|eXit) --clear - removes the file holding the enironment state of last install --test-pub - performs the publication test directly (same publication called in last step of install) --info - provides a brief explaination of the DataNode --upgrade|--update|update - upgrades/updates the node manager --force - will allow execution of installation|update code to be executed beyond the up2date checks. Basically allowing an installation|update as if starting from scratch. $(echo_fail --prefix)* - specify the top level directory for this entire installation's \"external\" tools (default:/usr/local - currently:$install_prefix) see: \"Key Environment Vars\" section below $(echo_fail --workdir)* - specify the directory used by the installation to download and build esg artifacts for installation (default:~/workbench/esg - currently:$workdir) see: \"Key Environment Vars\" section below ------------------------------- Configuration Type Selection ------------------------------- --type | -t - Select type of node: \"data\" node, \"index\" node, \"idp\" (identity provider) node and/or \"compute\" node (note: there is no default one must be selected at least initially... when in doubt use \"data\") --set-type - Sets the type value to be used at next start up --get-type - returns the last stored type code value of the last run node configuration (data=4 +| index=8 +| idp=16) (note: (s|g)et-type flags are singular command flags, meaning they are NOT meant to work with other flags on the same command line!) ------------------------------- Federation / Node Relationship Flags: ------------------------------- --set-default-peer --get-default-peer - tells you this node's currently configured default peer --set-peer-group --get-peer-group - tells you this node's currenly configured peer group(s) --federation-sanity-check - tells you if your node's configured peer groups intersect with default peer --spotcheck - provides basic federation mesh (inter-connectivity) information. (see esgf-spotcheck script directly) ------------------------------- Security Policy... ------------------------------- --policy-check - returns the list of policies that are triggered by the provided resource ------------------------------- Index (search) Utils... ------------------------------- --optimize-index [--force] - optimizes search index for \"index\" nodes (must be called local to index node) --crawl - ingest remote catalogs and their descendants into local index for searching. --add-replica-shard [:] - creates a local replica index of the index at the given mapping to local --remove-replica-shard [:] - removes the local replica index bound to named by --list-shards - shows list of registered local replica indexes --init-shards - adds all shards listed in the replica shard configuration file --check-shards - checks the configuration for local replica indexes --time-shards - cycles through all known index shards providing query times for each [-show-local] will show timing information for locally replicated shards [-timeout ] will set the connection timeout value (in seconds) --update-publisher-resources - update local repository of publication schemas et al. ------------------------------- Publication Endpoint Setting (Index Peer) ------------------------------- --set-index-peer Ex: pcmdi.llnl.gov --get-index-peer - tells you where this node is indexing it's data ------------------------------- IDP Endpoint Setting (IDP Peer) ------------------------------- --set-idp-peer | --set-admin-peer [make selection from menu or input information for idp peer you wish to present credentials to] --get-idp-peer - tells you where this node is authenticating against. ------------------------------- Globus Tool Management ------------------------------- --no-globus - will not perform any operations affecting globus tools (for those with existing globus setups) --gridftp-config - [ bdm &| end-user ] (defaults to both) --myproxy - [gen-self-cert] | [regen-simpleca] [fetch-certs|gen-self-cert|keep-certs] | [install|update] ------------------------------- Key Management Flags: ------------------------------- --register - connects to desired node, fetches and stores their certificate to enable ingress SSL connections --migrate-tomcat-credentials-to-esgf - moves credentials and supporting security files under ${esg_config_dir} --generate-ssl-key-and-csr - generate new key and cert request files [] (The 'dn' value is of the form - Ex:'/O=ESG/OU=ESGF.ORG/CN=${esgf_host:-$(hostname --fqdn)}') command> $(echo_c -fg magenta "esg-node --generate-ssl-key-and-csr ${tomcat_conf_dir}/${esgf_host:-$(hostname --fqdn)}-esg-node.csr ${tomcat_conf_dir}/hostkey.pem /O=ESGF/OU=ESGF.ORG/CN=${esgf_host:-$(hostname --fqdn)}") --install-ssl-keypair - takes as input private key and public cert files and installs them. [ [ [ ]]] --fetch-esgf-certs - fetches and installs all current public esgf certificates (used by app server and by globus) --rebuild-truststore - converts globus' (public) ca certificates into a truststore. --add-my-cert-to-truststore - adds public cert from keystore to truststore: (secondary flags for this option) --keystore | -ks --keystore-pass | -kpass --alias | -a --truststore | -ts --truststore-pass | tpass --clear-certs - removes the user-level public certificates' directory (used by myproxy) --check-certs - checks both SSL private certificate and Globus private cert for expiration --set-auto-fetch-certs [true|false] - sets if federation certificates are loaded automatically upon startup $(echo_fail --install-signed-cert)* - installs signed host certificate into keystore and truststore $(echo_fail --export-keys-to-globus)* - exports keystore certificates to globus host{cert,key}.pem files. --dname - specify the certificate distinguished name to be used when creating certificates (the DN value is of the form: \"CN=node.lab.gov, OU=simpleCA-pcmdi3.llnl.gov, OU=GlobusTest, O=Grid\") --keystore-password - sets the password to use for the java keystore - default is the node password. --keystore-alias - the alias that is associated with the public cert in the keystore --keystore-file - specify what the keystore file should be - default $CATALINA_HOME/tomcat/conf/keystore-tomcat (misc. globus related) --simpleCA-relink - relink orphaned simpleCA configuration symbolic file links. ------------------------------- Database Management ------------------------------- --config-db - provides database only configuration to support based on note type (see --get-type) --backup-db - creates a backup of node database to file the prefix for all schemas provided is \"esgf_\" Ex: -s foo will reference schema esgf_foo [-s|--schema] [-db|--database] --restore-db - restores from (nth) backup of database [-n] (default is n=1 or the last / most recent backup, 1 <= n <= 7) [-s|--schema] [-db|--database] ------------------------------- Trouble Shooting ------------------------------- --verify-thredds-credentials - if there is a thredds/publisher \"re-init\" issue this will fix it ------------------------------- Other ------------------------------- --shell - invokes the esgf shell (esgf-sh) to provide a command line REPL inteface to the node ------------------------------- Node Life Cycle Flags: ------------------------------- start - start the node's services stop - stops the node's services status - status on node's services restart - restarts the node's services (calls stop then start :-/) update - update's the node's software stack to prescribed versions (notice, no \"--\" flag prefix to make rc friendly also chkconfig-able ;-) ------------------------------- To add this script to the linux boot sequence: (as root) > cd /etc/init.d > cp ${install_prefix}/bin/esg-node . > chkconfig --add esg-node > chkconfig --list esg-node ------------------------------- \"stop\" | \"start\" | \"status\" are meant to be run independent of other flags (and put LAST if other flags are used) \"--install\" may be used with \"--verify\" but neither are not intended for use with stop or start or status Ex: ${progname} --install OR ${progname} --verify OR ${progname} --install --verify OR ${progname} --write-env OR ${progname} --version OR ${progname} --clear OR ${progname} --test-pub OR ${progname} --info OR ${progname} --register [hostname of node] ([truststore passwd]) ${progname} --gridftp-config [ bdm &| end-user ] ${progname} start OR ${progname} --gridftp-config [ bdm &| end-user ] start OR ${progname} stop OR ${progname} status OR NOTE: *You must be root or effectively root to run this program, *prefixing the command with sudo will not allow the use of *needed environment variables!!!! If you must use sudo, do so *only to become root proper then source your user's .[bash]rc *file so that root has it's envronment set accordingly! Or you *can more simply become root using sudo's \"-s\" flag. After a *full install there will be a file created ($envfile) that has *the basic environment vars that were used and set during the *installation - this should be sourced by users of this *application stack. -------------- Key Environment Vars: -------------- The following variables are written to the $(echo_ok /etc/esg.env) file which sets up the installation environment. The 'OFFICIAL' names (not the [common_names]) should be used in that file. (Nomenclature: OFFICIAL_ENV_VARIABLE_NAME [common_name] : either can be set; common name overrules) $(echo_ok ESGF_HOME) [esg_root_dir] The location where configuration files are kept (the node's state) (default - /esg currently:$esg_root_dir) $(echo_ok ESGF_INSTALL_PREFIX) [install_prefix] Top level directory where core node artifacts are installed (default - /usr/local - currently:$install_prefix) $(echo_ok ESGF_INSTALL_WORKDIR) [workdir] Top level directory where core node artifacts are downloaded and built (default - \$ESGF_USER_HOME/workbench/esg - currently:$workdir) $(echo_strong ESGF_USER_HOME) [installer_home] Directory where the installation user's home directory is. Here for historical reasons when this user's home was used to download and build artifacts. It prefixes the ESGF_INSTALL_DIR [workdir] if it is not explicitly set (default - /usr/local/src/esgf - currently:$installer_home) -------------- Typical usage: -------------- Installation : esg-node --install --verify (submit csr and get back returned cert) Credentials : esg-node --install-ssl-keypair Test Publish : esg-node --test-pub Update Certs : esg-node [--force] --rebuild-truststore Life Cycle : esg-node [start|stop|status|restart|update] ___ ___ ___ ___ | __/ __|/ __| __| | _|\__ \ (_ | _| |___|___/\___|_| " exit 0 } done_remark() { echo "" echo "Finished!..." echo "In order to see if this node has been installed properly you may direct your browser to:" # echo "http://${esgf_host}/esgf-node-manager" if [ $((sel & DATA_BIT )) != 0 ] ; then echo "http://${esgf_host}/thredds" echo "http://${esgf_host}/esg-orp" #echo "http://${esgf_host}/esgf-desktop" fi if [ $((sel & INDEX_BIT)) != 0 ]; then echo "http://${esgf_host}/" fi if [ $((sel & COMPUTE_BIT)) != 0 ]; then echo "http://${esgf_host}/las" fi echo echo "Your peer group membership -- : $(echo_strong [${node_peer_group}])" echo "Your specified \"default\" peer : $(echo_strong [$esgf_default_peer])" echo "Your specified \"index\" peer - : $(echo_strong [${esgf_index_peer}]) (url = http://${esgf_index_peer}/)" echo "Your specified \"idp\" peer --- : $(echo_strong [${esgf_idp_peer}]) (name = ${esgf_idp_peer_name})" echo "Your temporary certificates have been placed in $(echo_strong /etc/tempcerts)" echo "You can install them by executing this : $(echo_strong esg-node --install-keypair /etc/tempcerts/hostcert.pem /etc/tempcerts/hostkey.pem)" echo "When promped for the chainfile, specify: $(echo_strong /etc/tempcerts/cacert.pem)"; if [ -d "${thredds_content_dir}/thredds" ]; then echo echo "[Note: Use UNIX group permissions on ${thredds_content_dir}/thredds/esgcet to enable users to be able to publish thredds catalogs from data therein]" echo " %> chgrp -R ${thredds_content_dir}/thredds" fi printf " ------------------------------------------------------- Administrators of this node should subscribe to the esgf-node-admins@lists.llnl.gov by sending email to: $(echo_strong "majordomo@lists.llnl.gov") with the body: $(echo_strong "subscribe esgf-node-admins") ------------------------------------------------------- " #echo "(\"Test Project\" -> pcmdi.${esg_root_id}.${node_short_name}.test.mytest)" echo "" } #--------------- # s'all about the bits... :-) #--------------- declare -r INSTALL_BIT=1 declare -r TEST_BIT=2 declare -r DATA_BIT=4 declare -r INDEX_BIT=8 declare -r IDP_BIT=16 declare -r COMPUTE_BIT=32 declare -r WRITE_ENV_BIT=64 #declare -r PRIVATE_BIT=128 #NOTE: remember to adjust (below) when adding new bits!! declare -r MIN_BIT=4 declare -r MAX_BIT=64 declare -r ALL_BIT=$((DATA_BIT+INDEX_BIT+IDP_BIT+COMPUTE_BIT)) #--------------- #"static" color code arrays... used by echo_c() in esg-functions declare -ar _at_code=("00" "01" "04" "06" "07") declare -ar _fg_code=("00" "30" "31" "32" "33" "34" "35" "36" "37") declare -ar _bg_code=("00" "40" "41" "42" "43" "44" "45" "46" "47") declare -r _esgf_none=0; declare -r _esgf_bold=1; declare -r _esgf_underscore=2; declare -r _esgf_blink=3; declare -r _esgf_reverse=4 declare -r _esgf_black=1; declare -r _esgf_red=2; declare -r _esgf_green=3; declare -r _esgf_yellow=4; declare -r _esgf_blue=5; declare -r _esgf_magenta=6; declare -r _esgf_cyan=7; declare -r _esgf_white=8 declare -r color_capable=$(sed -n -e '/[0-9]/p' <<< $(tput colors)) show_type() { [ $((sel & DATA_BIT)) != 0 ] && resolved_to+="data " [ $((sel & INDEX_BIT)) != 0 ] && resolved_to+="index " [ $((sel & IDP_BIT)) != 0 ] && resolved_to+="idp " [ $((sel & COMPUTE_BIT)) != 0 ] && resolved_to+="compute " echo "node type: [ ${resolved_to}] (${sel}) " } upgrade_mode=0 install_mode=0 main() { # Sourcing esg-functions file [ -e ${esg_functions_file} ] && source ${esg_functions_file} && ((VERBOSE)) && printf "sourcing from: ${esg_functions_file} \n" # Initializing debug_print "esg-node initializing..." ! hostname --fqdn >& /dev/null && echo "Please be sure this host has a fully qualified hostname and reponds to \"hostname --fqdn\" command" && exit 1 # Determining if devel or master directory of the ESGF distribution mirror will be use for download of binaries [ -z "$(echo ${script_version} | sed -n 's/-devel/\0/p')" ] && devel=0 || devel=1 res=`echo $CDAT_HOME | grep -c conda` if [ $res == 0 ]; then unset CDAT_HOME fi # Determining ESGF distribution mirror if [[ $@ == *install* || $@ == *update* || $@ == *upgrade* ]]; then get_esgf_dist_mirror "interactive" $devel else get_esgf_dist_mirror "fastest" $devel fi # Setting esg_dist_url with previously gathered information esg_dist_url_root="${esgf_dist_mirror}/dist" esg_dist_url=${esg_dist_url_root}$( ((devel == 1)) && echo "/devel" || echo "") # Sourcing esg-init file [ -e ${esg_init_file} ] && source ${esg_init_file} && ((VERBOSE)) && printf "sourcing from: ${esg_init_file} \n" # Downloading esg-installarg file if [ ! -e ${esg_installarg_file} ] || ((force_install)) || [ ${esg_installarg_file} -ot $0 ]; then checked_get ${esg_installarg_file} ${esg_dist_url}/esgf-installer/${esg_installarg_file##*/} $((force_install)) [ ! -s "${esg_installarg_file}" ] && rm ${esg_installarg_file} touch ${esg_installarg_file} fi let sel=0 local selection_string while [ -n "$1" ]; do #echo "arg ${i} = $1" local unshift=0 case $1 in --install | install | update | --update | upgrade | --upgrade) if [ "${1##*-}" = "update" ] || [ "${1##*-}" = "upgrade" ] ; then if (( (install_mode+upgrade_mode) == 0)); then upgrade_mode=1 install_mode=0 debug_print "UPDATE SERVICES" self_verify "update" fi else if (( (install_mode+upgrade_mode) == 0)); then upgrade_mode=0 install_mode=1 debug_print "INSTALL SERVICES" fi fi (( (sel & INSTALL_BIT) == 0 )) && ((sel+=INSTALL_BIT)) ;; --fix-perms | fixperms) debug_print "fixing permissions" setup_sensible_confs exit 0; ;; --install-local-certs | installlocalcerts) debug_print "installing local certs" read_sel install_local_certs exit 0; ;; --generate-esgf-csrs | generateesgfcsrs) debug_print "generating esgf csrs" read_sel generate_esgf_csrs exit 0; ;; --generate-esgf-csrs-ext | generateesgfcsrsext) debug_print "generating esgf csrs for other node" read_sel generate_esgf_csrs_ext exit 0; ;; --cert-howto | certhowto) debug_print "cert howto" cert_howto exit 0; ;; --verify | --test) debug_print "VERIFY SERVICES" (( (sel & TEST_BIT) == 0 )) && ((sel+=TEST_BIT)) debug_print "sel = $sel " test_postgress test_cdat test_esgcet test_tomcat test_tds exit 0; ;; --type | -t | --flavor) #TODO: look for next arg if "data" node or if "index" node set the selection bit accordingly #This also means that I can nott use 1 for basic install will have to use 1 for prerequisite #and then another bit for "data" node vs "index" node local tvalue shift until [ $(echo $1 | egrep '^\s*--') ] || [ -z "$1" ] || [ "$1" = "stop" ] || [ "$1" = "start" ] || [ "$1" = "status" ] || [ "$1" = "restart" ] || [ "$1" = "update" ] || [ "$1" = "upgrade" ] || [ "$1" = "install" ]; do tvalue=$(echo "$1" | tr 'A-Z' 'a-z') #turn on the proper bit when string is detected [ "all" = "${tvalue}" ] && sel=$ALL_BIT && selection_string="all " && shift && break [ "data" = "${tvalue}" ] && (( (sel & DATA_BIT) == 0 )) && ((sel+=DATA_BIT)) && selection_string+="${tvalue} " [ "index" = "${tvalue}" ] && (( (sel & INDEX_BIT) == 0 )) && ((sel+=INDEX_BIT)) && selection_string+="${tvalue} " [ "idp" = "${tvalue}" ] && (( (sel & IDP_BIT) == 0 )) && ((sel+=IDP_BIT)) && selection_string+="${tvalue} " [ "compute" = "${tvalue}" ] && (( (sel & COMPUTE_BIT) == 0 )) && ((sel+=COMPUTE_BIT)) && selection_string+="${tvalue} " shift done [ -z "${selection_string}" ] && echo "Unknown Node Type: [${tvalue}], Sorry :-(" && exit 1; echo "node type set to: [ $selection_string] (${sel}) " unshift=1 ;; --set-type) let sel=0 local tvalue shift until [ -z "$1" ]; do tvalue=$(echo "$1" | tr 'A-Z' 'a-z') #turn on the proper bit when string is detected [ "all" = "${tvalue}" ] && sel=$ALL_BIT && selection_string="all " && shift && break [ "data" = "${tvalue}" ] && ((sel+=DATA_BIT)) && selection_string+="${tvalue} " [ "index" = "${tvalue}" ] && ((sel+=INDEX_BIT)) && selection_string+="${tvalue} " [ "idp" = "${tvalue}" ] && ((sel+=IDP_BIT)) && selection_string+="${tvalue} " [ "compute" = "${tvalue}" ] && ((sel+=COMPUTE_BIT)) && selection_string+="${tvalue} " shift done [ -z "${selection_string}" ] && echo "Unknown Node Type: [${tvalue}], Sorry :-(" && exit 1; [ ! -d ${esg_config_dir} ] && mkdir -p ${esg_config_dir} echo "node type set to: [ $selection_string] (${sel}) " set_sel ${sel} exit 0 ;; --get-type | --show-type) read_sel show_type exit 0 ;; start | --start) shift (( $# != 0 )) && echo "error: \"start\" must be the last argument in the arg sequence, Sorry :-(" && exit 1 check_prerequisites [ $? != 0 ] && echo && exit 1 debug_print "START SERVICES: ${sel}" init_structure start ${sel} exit 0 ;; stop | --stop | shutdown | --shutdown) shift (( $# != 0 )) && echo "error: \"stop\" must be the last argument in the arg sequence, Sorry :-(" && exit 1 check_prerequisites [ $? != 0 ] && echo && exit 1 debug_print "STOP SERVICES" init_structure stop ${sel} exit 0 ;; restart | --restart) shift (( $# != 0 )) && echo "error: \"restart\" must be the last argument in the arg sequence, Sorry :-(" && exit 1 check_prerequisites [ $? != 0 ] && echo && exit 1 debug_print "RESTARTING SERVICES" init_structure stop ${sel} sleep 2 start ${sel} exit 0 ;; status | --status) shift (( $# != 0 )) && echo "error: \"status\" must be the last argument in the arg sequence, Sorry :-(" && exit 1 check_prerequisites [ $? != 0 ] && echo && exit 1 status exit $? ;; --update-sub-installer) shift self_verify check_prerequisites [ $? != 0 ] && echo && exit 1 init_structure update_script $@ exit 0 ;; --update-apache-conf | updateapacheconf) debug_print "checking for updated apache frontend configuration" update_apache_conf exit 0; ;; --write-env) (( (sel & WRITE_ENV_BIT) == 0 )) && ((sel+=WRITE_ENV_BIT)) echo ;; -v | --version) echo "Version: $script_version" echo "Release: $script_release" echo "Earth Systems Grid Federation (http://esgf.llnl.gov)" echo "ESGF Node Installation Script" echo "" exit 0 ;; --recommended) recommended=1 custom=0 ;; --custom) recommended=0 custom=1 ;; --use-local-files) use_local_files=1 ;; --devel) devel=1 ;; --prod) devel=0 #hence... production ;; --debug) DEBUG=1 ;; --verbose) VERBOSE=1 ;; --clear) self_verify check_prerequisites [ $? != 0 ] && echo && exit 1 if [ -e ${envfile} ]; then mv -v ${envfile} ${envfile}.bak echo "Cleared envfile ${envfile}" fi exit 0 ;; --clear-my-certs) self_verify check_prerequisites [ $? != 0 ] && echo && exit 1 echo "Clearing out certs..." local cert_dir=${HOME}/.globus/certificates [ -e ${cert_dir} ] && rm -rf ${cert_dir} exit 0 ;; --test-pub) init_structure shift echo "test_publication" && test_publication $@ exit 0 ;; --test-globus) init_structure read_sel shift test_globus $sel $@ exit 0 ;; --info) esgf_node_info exit 0 ;; --check) shift self_verify $1 exit $? ;; --config-db) self_verify check_prerequisites [ $? != 0 ] && echo && exit 1 read_sel config_db exit $? ;; --backup-db) self_verify check_prerequisites [ $? != 0 ] && echo && exit 1 init_structure shift backup_db $@ exit $? ;; --restore-db) self_verify check_prerequisites [ $? != 0 ] && echo && exit 1 init_structure shift restore_nth_db_backup $@ exit $? ;; --verify-thredds-credentials) shift (( $# != 0 )) && echo "error: \"--verify-thredds-credentials\" must be the last argument in the arg sequence, Sorry :-(" && exit 1 self_verify check_prerequisites [ $? != 0 ] && echo && exit 1 init_structure verify_thredds_credentials exit $? ;; --uninstall) self_verify check_prerequisites [ $? != 0 ] && echo && exit 1 uninstall exit $? ;; --get-idp-peer) get_property esgf_idp_peer echo echo " Current IDP Peer: [${esgf_idp_peer}]" echo exit 0 ;; --set-idp-peer|--set-admin-peer) self_verify check_prerequisites init_structure read_sel select_idp_peer exit $? ;; --get-index-peer) get_property esgf_index_peer echo echo " Current Index Peer: [${esgf_index_peer}]" echo exit 0 ;; --set-index-peer) self_verify check_prerequisites shift init_structure read_sel set_index_peer ${1:-"self"} "p2p" exit $? ;; --set-publication-target) self_verify check_prerequisites shift init_structure read_sel set_index_peer ${1:-"nohost"} "gateway" exit $? ;; --get-default-peer) get_property esgf_default_peer echo echo " Current \"default\" Peer: [${esgf_default_peer}]" echo exit 0 ;; --set-default-peer) read_sel shift esgf_default_peer="$*" write_as_property esgf_default_peer check_for_group_intersection_with ${esgf_default_peer} echo " Default Peer set to: [${esgf_default_peer}]" echo " (restart node to enable default peer value)" echo exit $? ;; --get-peer-group|--get-peer-groups) get_property node_peer_group echo echo " Configured to participate in peer group(s): [${node_peer_group}]" echo exit 0 ;; --set-peer-group|--set-peer-groups) shift node_peer_group="$*" write_as_property node_peer_group read_sel check_for_group_intersection_with ${node_peer_group} echo " Peer Group is set to: [${node_peer_group}]" echo " (restart node to enable group value)" echo exit $? ;; --federation-sanity-check) shift read_sel check_for_group_intersection_with $* exit $? ;; --spotcheck) shift local target="$*" if grep -q '\-\-' <<<${1} || [ -z "$*" ]; then target="localhost $*" fi (${scripts_dir}/esgf-spotcheck ${target}) exit $? ;; --optimize-index) shift (source ${scripts_dir}/esgf-optimize-index $*) [ $? != 0 ] && echo "The flag --optimize-index is not enabled..." && exit 1 exit 0 ;; --crawl) shift [ -e "${scripts_dir}/esgf-crawl" ] && (${scripts_dir}/esgf-crawl $@) || echo "Sorry, this option not supported :-(" exit $? ;; --policy-check) shift (source ${scripts_dir}/esgf-policy-check && check_policies_for_resource $*) [ $? != 0 ] && echo "The flag --policy-check is not enabled..." && exit 1 exit 0 ;; --register) self_verify #First arg is the server #Second arg is the password (not required) shift init_structure register $1 $2 [ $? == 0 ] && _set_no_auto_fetch_file exit $? ;; --no-auto-fetch-certs) _set_no_auto_fetch_file node_auto_fetch_certs=false ;; --set-auto-fetch-certs) shift [ "$1" = "off" ] || [ "$1" = "false" ] && node_auto_fetch_certs=false || node_auto_fetch_certs=true write_as_property node_auto_fetch_certs exit $? ;; --fetch-esgf-certs) self_verify read_sel shift (( $# != 0 )) && echo "error: \"--fetch-esgf-certs\" must be the last argument in the arg sequence, Sorry :-(" && exit 1 id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust be root for this option\n\n" && exit 1 #get_esgf_dist_mirror "interactive" #esg_dist_url_root=${esgf_dist_mirror} esg_dist_url=${esg_dist_url_root}$( ((devel == 1)) && echo "/devel" || echo "") fetch_esgf_certificates exit $? ;; --rebuild-truststore) shift (( $# != 0 )) && echo "error: \"--rebuild-truststore\" must be the last argument in the arg sequence, Sorry :-(" && exit 1 self_verify id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust be root for this option\n\n" && exit 1 rebuild_truststore exit $? ;; --add-my-cert-to-truststore) self_verify shift id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust be root for this option\n\n" && exit 1 add_my_cert_to_truststore $@ exit $? ;; --generate-ssl-key-and-csr) self_verify shift id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust be root for this option\n\n" && exit 1 #arg 1 -> what we want to name the public cert request (csr) #arg 2 -> what we want to name the private key #arg 3 -> what we want to DN to be for the public cert #arg 4 -> (keystore password) #(no args necessary, all args have defaults) #Ex: esg-node --generate-ssl-key-and-csr /usr/local/tomcat/conf/-esg-node.csr /usr/local/tomcat/conf/hostkey.pem /O=ESGF/OU=ESGF.ORG/CN= generate_ssl_key_and_csr $@ exit $? ;; --migrate-tomcat-credentials-to-esgf) shift migrate_tomcat_credentials_to_esgf sanity_check_web_xmls exit $? ;; --simpleCA-relink | --simpleca-relink) self_verify shift (source ${scripts_dir}/esg-globus && simpleCA_relink $@) exit $? ;; --myproxy-sanity-check) self_verify shift (source ${scripts_dir}/esg-globus && sanity_check_myproxy_configurations $@) exit $? ;; --check-certs) self_verify shift check_certificates $@ exit $? ;; --install-ssl-keypair | --install-keypair) self_verify shift id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust be root for this option\n\n" && exit 1 install_keypair $@ exit $? ;; --update-temp-ca) debug_print "updating temporary CA" read_sel setup_temp_ca install_local_certs "firstrun" exit 0; ;; --install-globus-online) self_verify shift id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust be root for this option\n\n" && exit 1 (cd ${scripts_dir} local fetch_file=esg-globus verbose_print "checked_get ./${fetch_file} ${esg_dist_url}/${server_dir}/${fetch_file}" checked_get ./${fetch_file} ${esg_dist_url}/externals/bootstrap/${fetch_file} (( $? > 1 )) && popd && return 1 chmod 755 ${fetch_file}) (source ${scripts_dir}/esg-globus && setup_globus_online $@) exit $? ;; --set-dname | --set-dn) #In Java Style (Ex: OU=ESGF.ORG, O=ESGF scheme using "standard2java_dn" function | micro -> macro, [, ] separated values) shift dname=$1 ;; --keystore-password) shift keystore_password=$1 ;; --keystore-alias) shift keystore_alias=$1 ;; --keystore-file) shift keystore_file=$1 ;; #************* #(deprecated) #************* --install-signed-cert) self_verify shift id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust be root for this option\n\n" && exit 1 install_signed_certificate $@ exit $? ;; #************* #(deprecated) #************* --export-keys-to-globus) shift id | grep root >& /dev/null [ $? != 0 ] && printf "$([FAIL]) \n\tMust be root for this option\n\n" && exit 1 export_keystore_as_globus_hostkeys exit $? ;; --prefix) printf " $(echo_fail DEPRECATED): The $(echo_fail $1) flag has been deprecated. Instead, please use the environment variable $(echo_strong "ESGF_INSTALL_PREFIX") Ex: bash> $(echo_strong ESGF_INSTALL_PREFIX)=/usr/local ${0} ... " exit $? ;; --workdir) printf " $(echo_fail DEPRECATED): The $(echo_fail $1) flag has been deprecated. Instead, please use the environment variable $(echo_strong "ESGF_INSTALL_WORKDIR") Ex: bash> $(echo_strong ESGF_INSTALL_WORKDIR)=${installer_home}/workbench/esg ${0} ... " exit $? ;; --no-globus) no_globus=1 ;; --force) force_install=1 ;; --gridftp-config) read_sel (( (sel & DATA_BIT) == 0 )) && echo "Sorry, the --gridftp-config flag may only be used for \"data\" installation type" && exit 1 #acceptable args: "bdm" | "end-user" #Gather up tokens after this switch as long as the #subsequent tokens do not start with "--" local tmpargs="" #array to store args for this switch. local let index=0 shift until [ $(echo $1 | egrep '^\s*--') ] || [ -z "$1" ] || [ "$1" = "stop" ] || [ "$1" = "start" ] || [ "$1" = "status" ] || [ "$1" = "restart" ] || [ "$1" = "update" ] || [ "$1" = "upgrade" ] || [ "$1" = "install" ]; do tmpargs[((index++))]=$1 debug_print "added $1 to args list: ${tmpargs[@]}" shift done unshift=1 [ "${#tmpargs}" = 0 ] && printf "\n\n must follow --gridftp-config with proper flag! \n\t[\"bdm\" &| \"end-user\"]\n" && usage gridftp_config=${tmpargs[@]} ((DEBUG))&& echo "parsed from commandline - gridftp_config is [${gridftp_config}]" unset tmpargs ;; --index-config) read_sel (( (sel & INDEX_BIT) == 0 )) && echo "Sorry, the --index-config flag may only be used for \"index\" installation type" && exit 1 #acceptable args: "master" &| "slave" #Gather up tokens after this switch as long as the #subsequent tokens do not start with "--" local tmpargs="" #array to store args for this switch. local let index=0 shift until [ $(echo $1 | egrep '^\s*--') ] || [ -z "$1" ] || [ "$1" = "stop" ] || [ "$1" = "start" ] || [ "$1" = "status" ] || [ "$1" = "restart" ] || [ "$1" = "update" ] || [ "$1" = "upgrade" ] || [ "$1" = "install" ]; do tmpargs[((index++))]=$1 debug_print "added $1 to args list: ${tmpargs[@]}" shift done unshift=1 [ "${#tmpargs}" = 0 ] && printf "\n\n must follow --index-config with proper flag! \n\t[\"master\" &| \"slave\"]\n" && usage index_config=${tmpargs[@]} ((DEBUG))&& echo "parsed from commandline - index_config is [${index_config}]" unset tmpargs ;; --check-shards) shift [ -e "${scripts_dir}/esg-search" ] && (source ${scripts_dir}/esg-search >& /dev/null && check_shards $@) exit $? ;; --add-replica-shard) #expecting : | master | slave shift [ -e "${scripts_dir}/esg-search" ] && (source ${scripts_dir}/esg-search >& /dev/null && add_shard $@) exit $? ;; --remove-replica-shard) shift [ -e "${scripts_dir}/esg-search" ] && (source ${scripts_dir}/esg-search >& /dev/null && remove_shard $@) exit $? ;; --list-shards) shift [ -e "${scripts_dir}/esg-search" ] && (source ${scripts_dir}/esg-search >& /dev/null && list_local_shards) exit $? ;; --init-shards) shift [ -e "${scripts_dir}/esg-search" ] && (source ${scripts_dir}/esg-search >& /dev/null && init_all_shards) exit $? ;; --time-shards) shift [ -e "${scripts_dir}/esg-search" ] && (source ${scripts_dir}/esg-search >& /dev/null && time_shards $@) exit $? ;; --update-publisher-resources) shift [ -e "${scripts_dir}/esg-search" ] && (source ${scripts_dir}/esg-search >& /dev/null && setup_publisher_resources $@) exit $? ;; --shell) shift [ -e "${scripts_dir}/esgf-sh" ] && (${scripts_dir}/esgf-sh) || echo "Sorry, this option not supported :-(" exit $? ;; --fetch-static-shards-file) shift [ -e "${scripts_dir}/esg-search" ] && (source ${scripts_dir}/esg-search >& /dev/null && fetch_static_shards_file $@) exit $? ;; --fetch-static-idp-file) shift [ -e "${scripts_dir}/esg-orp" ] && (source ${scripts_dir}/esg-orp >& /dev/null && fetch_static_idp_file $@) exit $? ;; --fetch-static-ats-file) shift [ -e "${scripts_dir}/esg-orp" ] && (source ${scripts_dir}/esg-orp >& /dev/null && fetch_static_ats_file $@) exit $? ;; --fetch-known-providers-file) shift [ -e "${scripts_dir}/esg-orp" ] && (source ${scripts_dir}/esg-orp >& /dev/null && fetch_known_providers_file $@) exit $? ;; --myproxy) (( (sel & IDP_BIT) == 0 )) && echo "Sorry, the --myproxy flag may only be used for \"idp\" type commands" && exit 1 #acceptable args: [gen-self-cert] | [regen-simpleca] [fetch-certs|gen-self-cert|keep-certs] | [install|update] #Gather up tokens after this switch as long as the #subsequent tokens do not start with "--" local tmpargs="" #array to store args for this switch. local let index=0 shift until [ $(echo $1 | egrep '^\s*--') ] || [ -z "$1" ] || [ "$1" = "stop" ] || [ "$1" = "start" ] || [ "$1" = "status" ] || [ "$1" = "restart" ] || [ "$1" = "update" ] || [ "$1" = "upgrade" ] || [ "$1" = "install" ]; do tmpargs[((index++))]=$1 debug_print "added $1 to args list: ${tmpargs[@]}" shift done unshift=1 [ "${#tmpargs}" = 0 ] && printf "\n\n must follow --myproxy with proper flags! \n\n" && usage myproxy_config_args=${tmpargs[@]} unset tmpargs ;; -h | --help) usage ;; *) printf "\n ERROR: unknown switch \"$1\" \n\n" && exit 1 ;; esac ((!unshift)) && shift done check_prerequisites [ $? != 0 ] && echo && exit 1 self_verify debug_print "SEL = $sel" [ $((sel)) == 0 ] && usage echo echo "-----------------------------------" echo "ESGF Node Installation Program" echo "-----------------------------------" echo read_sel #If we are doing an install - make sure a type is selected if [ $((sel & INSTALL_BIT)) != 0 ] && (( !(sel >= $MIN_BIT && sel <= $MAX_BIT) )); then printf " Sorry no suitable node type has been selected Please run the script again with --set-type and provide any number of type values (\"data\", \"index\", \"idp\", \"compute\" [or \"all\"]) you wish to install (no quotes - and they can be specified in any combination or use \"all\" as a shortcut) Ex: esg-node --set-type data esg-node install or do so as a single command line: Ex: esg-node --type data install Use the --help | -h option for more information Note: The type value is recorded upon successfully starting the node. the value is used for subsequent launches so the type value does not have to be always specified. A simple \"esg-node start\" will launch with the last type used that successfully launched. Thus ideal for use in the boot sequence (chkconfig) scenario. (more documentation available at https://github.com/ESGF/esgf-installer/wiki)\n\n" exit 1 fi esgf_node_info local doit="n" echo (( devel == 1 )) && echo "(Installing DEVELOPMENT tree...)" && echo "" read -e -p "Are you ready to begin the installation? [Y/n] " doit if [ "${doit}" = "N" ] || [ "${doit}" = "n" ] || [ "${doit}" = "no" ]; then exit 0 fi init_structure (( force_install )) && echo "(force install is ON)" (( (sel & DATA_BIT ) != 0)) && echo "(data node type selected)" (( (sel & INDEX_BIT ) != 0)) && echo "(index node type selected)" (( (sel & IDP_BIT ) != 0)) && echo "(idp node type selected)" (( (sel & COMPUTE_BIT ) != 0)) && echo "(compute node type selected)" initial_setup_questionnaire #--------------------------------------- #Installation of prerequisites. #--------------------------------------- echo echo "*******************************" echo "Installing prerequisites" echo "*******************************" echo yum -y remove rpmforge-release yum -y install epel-release [ $? != 0 ] && printf "$([FAIL]) \n\tCould not configure epel repository\n\n" && return 1 yum -y install yum-plugin-priorities sqlite-devel freetype-devel git curl-devel autoconf automake bison file flex gcc gcc-c++ gettext-devel libtool uuid-devel libuuid-devel libxml2 libxml2-devel libxslt libxslt-devel lsof make openssl-devel pam-devel pax readline-devel tk-devel wget zlib-devel perl-Archive-Tar perl-XML-Parser libX11-devel libtool-ltdl-devel e2fsprogs-devel gcc-gfortran libicu-devel libgtextutils-devel httpd httpd-devel mod_ssl libjpeg-turbo-devel ntp '*ExtUtils*' [ $? != 0 ] && printf "$([FAIL]) \n\tCould not install or update prerequisites\n\n" && return 1 #--------------------------------------- #Setup ESGF RPM repository #--------------------------------------- echo echo "*******************************" echo "Setting up ESGF RPM repository" echo "*******************************" echo echo '[esgf]' > /etc/yum.repos.d/esgf.repo echo 'name=ESGF' >> /etc/yum.repos.d/esgf.repo if [[ ${DISTRIB} == "CentOS" ]] || [[ ${DISTRIB} = "Scientific Linux" ]] ; then echo "baseurl=${esgf_dist_mirror}/RPM/centos/6/x86_64" >> /etc/yum.repos.d/esgf.repo elif [[ ${DISTRIB} == "Red Hat"* ]] ; then echo "baseurl=${esgf_dist_mirror}/RPM/redhat/6/x86_64" >> /etc/yum.repos.d/esgf.repo fi echo 'failovermethod=priority' >> /etc/yum.repos.d/esgf.repo echo 'enabled=1' >> /etc/yum.repos.d/esgf.repo echo 'priority=90' >> /etc/yum.repos.d/esgf.repo echo 'gpgcheck=0' >> /etc/yum.repos.d/esgf.repo echo 'proxy=_none_' >> /etc/yum.repos.d/esgf.repo #--------------------------------------- #Installation of basic system components. # (Only when one setup in the sequence is okay can we move to the next) #--------------------------------------- # [ $((sel & INSTALL_BIT)) != 0 ] && setup_openssl [ $((sel & INSTALL_BIT)) != 0 ] && setup_java [ $((sel & INSTALL_BIT)) != 0 ] && setup_ant [ $((sel & INSTALL_BIT)) != 0 ] && setup_postgress [ $((sel & TEST_BIT)) != 0 ] && test_postgress [ $((sel & INSTALL_BIT)) != 0 ] && setup_cdat [ $((sel & TEST_BIT)) != 0 ] && test_cdat [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT+COMPUTE_BIT)) != 0 ] && setup_esgcet ${upgrade_mode} [ $((sel & TEST_BIT)) != 0 ] && [ $((sel & DATA_BIT+COMPUTE_BIT)) != 0 ] && test_esgcet [ $((sel & INSTALL_BIT)) != 0 ] && setup_tomcat ${upgrade_mode} [ $((sel & INSTALL_BIT)) != 0 ] && setup_apache_frontend [ $((sel & TEST_BIT)) != 0 ] && test_tomcat [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT+COMPUTE_BIT)) != 0 ] && setup_tds ${upgrade_mode} [ $((sel & TEST_BIT)) != 0 ] && [ $((sel & DATA_BIT+COMPUTE_BIT)) != 0 ] && test_tds [ $((sel & INSTALL_BIT)) != 0 ] && setup_subsystem node-manager esgf-node-manager #(tomcat off) [ $((sel & INSTALL_BIT)) != 0 ] && setup_subsystem dashboard esgf-dashboard #(tomcat off) #[ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && setup_subsystem desktop esgf-desktop #(tomcat off) # Compute Languages and Tools Setup #[ $((sel & INSTALL_BIT)) != 0 ] && setup_compute_languages #[ $((sel & INSTALL_BIT)) != 0 ] && setup_compute_tools #--------------------------------------- #Installation of "plugin" subsystems... & filters #--------------------------------------- #---subsystems----- [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && echo "orp security subsystem" && setup_subsystem orp esg-orp $@ #[ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && echo "drslib subsystem" && setup_subsystem drslib esgf-drslib $@ #---filters (TDS)-- # Getting TDS web.xml template file. Forcing download here let us update TDS filters config without going through tds_setup() # This is to be enabled when TDS web.xml changes need to be pulled in without resinstalling the whole TDS webapp. #[ $((sel & DATA_BIT)) != 0 ] && get_webxml_file [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && echo "security filters subsystem (SAML/ORP)" && service_name="thredds" setup_subsystem security-tokenless-filters filters $@ #[ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && echo "security filters subsystem (TOKEN)" && service_name="thredds" setup_subsystem security-token-filters filters $@ [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && echo "security las filter subsystem (IP)" && service_name="thredds" setup_subsystem security-las-ip-filter filters $@ #[ $((sel & INSTALL_BIT)) != 0 ] && echo "security filters subsystem (SAML/ORP)" && service_name="esgf-dashboard" setup_subsystem security-tokenless-filters filters $@ [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && echo "access logging filter subsystem (DATA)" && service_name="thredds" extensions=".nc" setup_subsystem access-logging-filter filters $@ #[ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && echo "DRS/FS resolving filter subsystem" && setup_subsystem drs-resolving-filter filters $@ #--------------------------------------- # Security Services Installation... #--------------------------------------- [ $((sel & IDP_BIT+DATA_BIT )) != 0 ] && setup_subsystem security esgf-security $@ [ $((sel & IDP_BIT )) != 0 ] && setup_subsystem idp esgf-idp $@ #--------------------------------------- # Globus Installation... #--------------------------------------- #(myproxy - [depends on security backend in place]) [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & IDP_BIT)) != 0 ] && (( ! no_globus )) && debug_print "calling... setup_globus [${IDP_BIT}]" && my_setup_globus $((INSTALL_BIT+IDP_BIT)) ${myproxy_config_args} [ $((sel & TEST_BIT)) != 0 ] && [ $((sel & IDP_BIT)) != 0 ] && (( ! no_globus )) && debug_print "calling... test_globus [${IDP_BIT}]" && test_globus $((TEST_BIT+IDP_BIT)) ${myproxy_config_args} #(gridftp) [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && (( ! no_globus )) && debug_print "calling... my_setup_globus [${DATA_BIT}]" && my_setup_globus ${DATA_BIT} ${gridftp_config} [ $((sel & TEST_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && (( ! no_globus )) && debug_print "calling... test_globus [${DATA_BIT}]" && test_globus ${DATA_BIT} ${gridftp_config} [ $((sel & INSTALL_BIT)) != 0 ] && (( ! no_globus )) && debug_print "installing local certs" && install_local_certs "firstrun" #OAuth SLCS setup [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & IDP_BIT)) != 0 ] && (( ! no_globus )) && setup_slcs #--------------------------------------- # Index type install... #--------------------------------------- [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & INDEX_BIT)) != 0 ] && echo "search subsystem" && setup_subsystem search esg-search ${index_config} #[ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & INDEX_BIT)) != 0 ] && echo "web-fe subsystem" && setup_subsystem web-fe esgf-web-fe $@ [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & INDEX_BIT)) != 0 ] && echo "cog subsystem" && setup_subsystem_new cog esgf-cog $@ [ $((sel & TEST_BIT)) != 0 ] && [ $((sel & INDEX_BIT)) != 0 ] && echo "test search subsystem" && \ (source ${scripts_dir}/esg-search >& /dev/null && test_search_services) #--------------------------------------- # Compute type install... #--------------------------------------- #Do a quick check to see if the compute bit should be turned on.... If so then turn on that switch and keep going... if [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & COMPUTE_BIT)) == 0 ] && sanity_check_for_hints_todo_compute; then ((sel+=COMPUTE_BIT)) fi [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & COMPUTE_BIT)) != 0 ] && setup_subsystem_new product-server esgf-product-server $@ #[ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & COMPUTE_BIT)) != 0 ] && setup_subsystem compute-tools esgf-compute-tools $@ [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & COMPUTE_BIT)) != 0 ] && echo "access logging filter subsystem (COMPUTE)" && service_name="las" extensions="ProductServer.do" setup_subsystem access-logging-filter filters $@ [ $((sel & INSTALL_BIT)) != 0 ] && [ $((sel & COMPUTE_BIT)) != 0 ] && echo "security filters subsystem (SAML/ORP)" && service_name="las" setup_subsystem security-tokenless-filters filters $@ #--------------------------------------- # Publishing Test... #--------------------------------------- [ $((sel & INSTALL_BIT)) != 0 ] && [ -n "${esgf_index_peer}" ] && set_index_peer "${esgf_index_peer}" [ $((sel & TEST_BIT)) != 0 ] && [ $((sel & DATA_BIT)) != 0 ] && echo "test_publication" && test_publication #--------------------------------------- #Summary and Installation Housekeeping... #--------------------------------------- [ $((sel & INSTALL_BIT)) != 0 ] && echo "show_summary" && show_summary [ $((sel & WRITE_ENV_BIT)) != 0 ] && echo "write_env" && write_env && exit 0 #--- #For the sake of the node manager registry, let's just make sure that the derivative whitelist files #exits and have the appropriate ownership and permissions #This function is defined in the esg-node-manager script, which at this point should have been already sourced! (source ${scripts_dir}/esg-node-manager >& /dev/null && touch_generated_whitelist_files) #--- #--------------------------------------- #System Launch... #--------------------------------------- sanity_check_web_xmls setup_root_app [ -e "${tomcat_install_dir}/work/Catalina/localhost" ] && rm -rf ${tomcat_install_dir}/work/Catalina/localhost/* && echo "Cleared tomcat cache... " # Hard coded to remove node manager, desktop and dashboard rm -rf /usr/local/tomcat/webapps/esgf-node-manager rm -rf /usr/local/tomcat/webapps/esgf-desktop rm -rf /usr/local/tomcat/webapps/esgf-dashboard #fix for sensible values for conf files post node-manager removal setup_sensible_confs start ${sel} install_bash_completion_file done_remark echo "${script_version}" > ${esg_root_dir}/version echo "${script_version}" echo write_as_property version ${script_version} write_as_property release ${script_release} write_as_property gridftp_config echo 'source /usr/local/conda/bin/activate esgf-pub' >> ${envfile} esg_node_finally } esg_node_finally() { debug_print "(esg_datanode: cleaning up etc...)" chown -R ${installer_uid}:${installer_gid} ${X509_CERT_DIR} >& /dev/null if [ $((sel & IDP_BIT)) != 0 ]; then export PGPASSWORD=${pg_sys_acct_passwd} echo Writing additional settings to db. If these settings already exist, psql will report an error, but ok to disregard. psql -U dbsuper -c "insert into esgf_security.permission values (1, 1, 1, 't'); insert into esgf_security.role values (6, 'user', 'User Data Access');" esgcet echo "Node installation is complete." fi if [ -p /tmp/outputpipe ]; then echo "Installer ran to completion. Now cleaning up. There will be a 'Killed' message in your setup-autoinstall terminal, which is not a cause for concern." >/tmp/outputpipe; fi #exec 1>&3 3>&- 2>&4 4>&- #wait $tpid #rm $OUTPUT_PIPE exit 0 } #Set system traps trap esg_node_finally INT TERM ##Setup output streams for install output capture #OUTPUT_PIPE=/tmp/${_t##*/}.pipe #[ ! -e $OUTPUT_PIPE ] && mkfifo $OUTPUT_PIPE #[ -e $logfile ] && rm $logfile # #exec 3>&1 4>&2 #tee $logfile < $OUTPUT_PIPE >&3 & #tpid=$! #exec > $OUTPUT_PIPE 2>&1 ### initialization done! main $@ #clear system traps trap - INT TERM