diff --git a/README.md b/README.md index 413331f..8437adc 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,9 @@ Get [ACE for Developers edition](https://www.ibm.com/marketing/iwm/iwm/web/pick. ### Build an image with App Connect Enterprise only +NOTE: The current dockerfiles are tailored towards use by the App Connect Operator and as a result may have function removed from it if we are no longer using it in our operator. If you prefer to use the old dockerfiles for building your containers please use the `Dockerfile-legacy.aceonly` file + + The `deps` folder must contain a copy of ACE, **version 12.0.1.0 or greater**. If using ACE for Developers, download it from [here](https://www.ibm.com/marketing/iwm/iwm/web/pick.do?source=swg-wmbfd). Then set the build argument `ACE_INSTALL` to the name of the ACE file placed in `deps`. diff --git a/ace_config_webusers.sh b/ace_config_webusers.sh deleted file mode 100755 index 797ac6e..0000000 --- a/ace_config_webusers.sh +++ /dev/null @@ -1,211 +0,0 @@ -#!/bin/bash - -# © Copyright IBM Corporation 2018. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Eclipse Public License v2.0 -# which accompanies this distribution, and is available at -# http://www.eclipse.org/legal/epl-v20.html - -if [ -z "$MQSI_VERSION" ]; then - source /opt/ibm/ace-12/server/bin/mqsiprofile -fi - -SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -source ${SCRIPT_DIR}/ace_config_logging.sh - -log "Handling webusers configuration" - -ADMINUSERSFILE=/home/aceuser/initial-config/webusers/admin-users.txt -OPERATORUSERSFILE=/home/aceuser/initial-config/webusers/operator-users.txt -EDITORUSERSFILE=/home/aceuser/initial-config/webusers/editor-users.txt -AUDITUSERSFILE=/home/aceuser/initial-config/webusers/audit-users.txt -VIEWERUSERSFILE=/home/aceuser/initial-config/webusers/viewer-users.txt - -if [ -s $ADMINUSERSFILE ] || [ -s $OPERATORUSERSFILE ] || [ -s $EDITORUSERSFILE ] || [ -s $AUDITUSERSFILE ] || [ -s $VIEWERUSERSFILE ]; then - OUTPUT=$(mqsichangeauthmode -w /home/aceuser/ace-server -s active -m file 2>&1) - logAndExitIfError $? "${OUTPUT}" -fi - -if [ -f $ADMINUSERSFILE ]; then - if [ -s $ADMINUSERSFILE ]; then - if [ -r $ADMINUSERSFILE ]; then - OUTPUT=$(mqsichangefileauth -w /home/aceuser/ace-server -r admin -p all+ 2>&1) - logAndExitIfError $? "${OUTPUT}" - - OUTPUT=$(mqsichangefileauth -w /home/aceuser/ace-server -r admin -o Data -p all+ 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "ERROR: $ADMINUSERSFILE is not readable" - exit 66 - fi - else - log "ERROR: $ADMINUSERSFILE is empty" - exit 67 - fi -fi - -if [ -f $OPERATORUSERSFILE ]; then - if [ -s $OPERATORUSERSFILE ]; then - if [ -r $OPERATORUSERSFILE ]; then - OUTPUT=$(mqsichangefileauth -w /home/aceuser/ace-server -r operator -p read+,write-,execute+ 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "ERROR: $OPERATORUSERSFILE is not readable" - exit 66 - fi - else - log "ERROR: $OPERATORUSERSFILE is empty" - exit 67 - fi -fi - -if [ -f $EDITORUSERSFILE ]; then - if [ -s $EDITORUSERSFILE ]; then - if [ -r $EDITORUSERSFILE ]; then - OUTPUT=$(mqsichangefileauth -w /home/aceuser/ace-server -r editor -p read+,write+,execute- 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "ERROR: $EDITORUSERSFILE is not readable" - exit 66 - fi - else - log "ERROR: $EDITORUSERSFILE is empty" - exit 67 - fi -fi - -if [ -f $AUDITUSERSFILE ]; then - if [ -s $AUDITUSERSFILE ]; then - if [ -r $AUDITUSERSFILE ]; then - OUTPUT=$(mqsichangefileauth -w /home/aceuser/ace-server -r audit -p read+,write-,execute- 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "ERROR: $AUDITUSERSFILE is not readable" - exit 66 - fi - else - log "ERROR: $AUDITUSERSFILE is empty" - exit 67 - fi -fi - -if [ -f $VIEWERUSERSFILE ]; then - if [ -s $VIEWERUSERSFILE ]; then - if [ -r $VIEWERUSERSFILE ]; then - OUTPUT=$(mqsichangefileauth -w /home/aceuser/ace-server -r viewer -p read+,write-,execute- 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "ERROR: $VIEWERUSERSFILE is not readable" - exit 66 - fi - else - log "ERROR: $VIEWERUSERSFILE is empty" - exit 67 - fi -fi - -if [ -s $ADMINUSERSFILE ] || [ -s $OPERATORUSERSFILE ] || [ -s $EDITORUSERSFILE ] || [ -s $AUDITUSERSFILE ] || [ -s $VIEWERUSERSFILE ]; then - - OLDIFS=${IFS} - - if [ -s $ADMINUSERSFILE ]; then - IFS=$'\n' - for line in $(cat $ADMINUSERSFILE | tr -d '\r'); do - if [[ $line =~ ^\# ]]; then - continue - fi - IFS=${OLDIFS} - fields=($line) - if [ $(mqsiwebuseradmin -w /home/aceuser/ace-server -l | grep admin | wc -l) -eq 0 ] - then - log "Creating admin user ${fields[0]}" - - OUTPUT=$(mqsiwebuseradmin -w /home/aceuser/ace-server -c -u ${fields[0]} -a ${fields[1]} -r admin 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "admin user already exists" - fi - done - fi - - if [ -s $OPERATORUSERSFILE ]; then - IFS=$'\n' - for line in $(cat $OPERATORUSERSFILE | tr -d '\r'); do - if [[ $line =~ ^\# ]]; then - continue - fi - IFS=${OLDIFS} - fields=($line) - if [ $(mqsiwebuseradmin -w /home/aceuser/ace-server -l | grep operator | wc -l) -eq 0 ] - then - log "Creating operator user ${fields[0]}" - - OUTPUT=$(mqsiwebuseradmin -w /home/aceuser/ace-server -c -u ${fields[0]} -a ${fields[1]} -r operator 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "operator user already exists" - fi - done - fi - - if [ -s $EDITORUSERSFILE ]; then - IFS=$'\n' - for line in $(cat $EDITORUSERSFILE | tr -d '\r'); do - if [[ $line =~ ^\# ]]; then - continue - fi - IFS=${OLDIFS} - fields=($line) - if [ $(mqsiwebuseradmin -w /home/aceuser/ace-server -l | grep editor | wc -l) -eq 0 ] - then - log "Creating editor user ${fields[0]}" - - OUTPUT=$(mqsiwebuseradmin -w /home/aceuser/ace-server -c -u ${fields[0]} -a ${fields[1]} -r editor 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "editor user already exists" - fi - done - fi - - if [ -s $AUDITUSERSFILE ]; then - IFS=$'\n' - for line in $(cat $AUDITUSERSFILE | tr -d '\r'); do - if [[ $line =~ ^\# ]]; then - continue - fi - IFS=${OLDIFS} - fields=($line) - if [ $(mqsiwebuseradmin -w /home/aceuser/ace-server -l | grep audit | wc -l) -eq 0 ] - then - log "Creating audit user ${fields[0]}" - - OUTPUT=$(mqsiwebuseradmin -w /home/aceuser/ace-server -c -u ${fields[0]} -a ${fields[1]} -r audit 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "audit user already exists" - fi - done - fi - - if [ -s $VIEWERUSERSFILE ]; then - IFS=$'\n' - for line in $(cat $VIEWERUSERSFILE | tr -d '\r'); do - if [[ $line =~ ^\# ]]; then - continue - fi - IFS=${OLDIFS} - fields=($line) - if [ $(mqsiwebuseradmin -w /home/aceuser/ace-server -l | grep viewer | wc -l) -eq 0 ] - then - log "Creating viewer user ${fields[0]}" - - OUTPUT=$(mqsiwebuseradmin -w /home/aceuser/ace-server -c -u ${fields[0]} -a ${fields[1]} -r viewer 2>&1) - logAndExitIfError $? "${OUTPUT}" - else - log "viewer user already exists" - fi - done - fi -fi diff --git a/ace_discover_port_overrides.sh b/ace_discover_port_overrides.sh deleted file mode 100755 index 18e2551..0000000 --- a/ace_discover_port_overrides.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -ex -if ! [[ -z "${SERVICE_NAME}" ]] && [[ -z "${MQSI_OVERRIDE_HTTP_PORT}" ]] && [[ -z "${MQSI_OVERRIDE_HTTPS_PORT}" ]] ; then - echo "export MQSI_OVERRIDE_HTTP_PORT=$(kubectl get svc ${SERVICE_NAME} -o jsonpath=\"{.spec.ports[1].nodePort}\")" >> /home/aceuser/portOverrides - echo "export MQSI_OVERRIDE_HTTPS_PORT=$(kubectl get svc ${SERVICE_NAME} -o jsonpath=\"{.spec.ports[2].nodePort}\")" >> /home/aceuser/portOverrides -fi diff --git a/ace_integration_server.sh b/ace_integration_server.sh index 85900c6..0cb1dfe 100755 --- a/ace_integration_server.sh +++ b/ace_integration_server.sh @@ -11,6 +11,11 @@ if [ -z "$MQSI_VERSION" ]; then source /opt/ibm/ace-12/server/bin/mqsiprofile fi +# Enable TLS on both MQ and DB2 +if [ -d /opt/mqm/gskit8/lib64 ]; then + export LD_LIBRARY_PATH=/opt/mqm/gskit8/lib64:$LD_LIBRARY_PATH +fi + if [ -s /home/aceuser/ace-server/odbc.ini ]; then export ODBCINI=/home/aceuser/ace-server/odbc.ini fi diff --git a/cmd/chkacehealthy/main.go b/cmd/chkacehealthy/main.go index a8e45a3..b56cf8e 100644 --- a/cmd/chkacehealthy/main.go +++ b/cmd/chkacehealthy/main.go @@ -36,11 +36,14 @@ func main() { if err != nil { - fmt.Println("REST endpoint failed" + err.Error()) + fmt.Println("Unable to connect to IntegrationServer REST endpoint: " + err.Error() + ", ") fileInfo, statErr := os.Stat("/tmp/integration_server_restart.timestamp") - if statErr != nil { + if os.IsNotExist(statErr) { + fmt.Println("Integration server is not active") + os.Exit(1) + } else if statErr != nil { fmt.Println(statErr) os.Exit(1) } else { diff --git a/cmd/runaceserver/integrationserver.go b/cmd/runaceserver/integrationserver.go index b91b830..9440e64 100644 --- a/cmd/runaceserver/integrationserver.go +++ b/cmd/runaceserver/integrationserver.go @@ -42,6 +42,7 @@ import ( "github.com/ot4i/ace-docker/internal/configuration" "github.com/ot4i/ace-docker/internal/name" "github.com/ot4i/ace-docker/internal/qmgr" + "github.com/ot4i/ace-docker/internal/webadmin" "gopkg.in/yaml.v2" "software.sslmate.com/src/go-pkcs12" @@ -49,10 +50,21 @@ import ( var osMkdir = os.Mkdir var osCreate = os.Create +var osStat = os.Stat var ioutilReadFile = ioutil.ReadFile +var ioutilReadDir = ioutil.ReadDir var ioCopy = io.Copy var contentserverGetBAR = contentserver.GetBAR var watcher *fsnotify.Watcher +var createSHAServerConfYaml = createSHAServerConfYamlLocal +var homedir string = "/home/aceuser" +var initialConfigDir string = "/home/aceuser/initial-config" +var ConfigureWebAdminUsers = webadmin.ConfigureWebAdminUsers +var readServerConfFile = readServerConfFileLocal +var yamlUnmarshal = yaml.Unmarshal +var yamlMarshal = yaml.Marshal +var writeServerConfFile = writeServerConfFileLocal +var getConfigurationFromContentServer = getConfigurationFromContentServerLocal // createSystemQueues creates the default MQ service queues used by the Integration Server func createSystemQueues() error { @@ -86,7 +98,7 @@ func initialIntegrationServerConfig() error { } } - fileList, err := ioutil.ReadDir("/home/aceuser") + fileList, err := ioutilReadDir(homedir) if err != nil { log.Errorf("Error checking for an initial configuration folder: %v", err) return err @@ -104,7 +116,7 @@ func initialIntegrationServerConfig() error { return nil } - fileList, err = ioutil.ReadDir("/home/aceuser/initial-config") + fileList, err = ioutil.ReadDir(initialConfigDir) if err != nil { log.Errorf("Error checking for initial configuration folders: %v", err) return err @@ -112,7 +124,6 @@ func initialIntegrationServerConfig() error { // Sort filelist to server.conf.yaml gets written before webusers are processedconfigDirExists SortFileNameAscend(fileList) - for _, file := range fileList { if file.IsDir() && file.Name() != "mqsc" && file.Name() != "workdir_overrides" { log.Printf("Processing configuration in folder %v", file.Name()) @@ -126,20 +137,29 @@ func initialIntegrationServerConfig() error { log.LogDirect(out) } else { if file.Name() == "webusers" { + log.Println("Configuring server.conf.yaml overrides - Webadmin") updateServerConf := createSHAServerConfYaml() if updateServerConf != nil { log.Errorf("Error setting webadmin SHA server.conf.yaml: %v", updateServerConf) return updateServerConf } + log.Println("Configuring WebAdmin Users") + err := ConfigureWebAdminUsers(log) + if err != nil { + log.Errorf("Error configuring the WebAdmin users : %v", err) + return err + } } - cmd := exec.Command("ace_config_" + file.Name() + ".sh") - out, _, err := command.RunCmd(cmd) - if err != nil { + if file.Name() != "webusers" { + cmd := exec.Command("ace_config_" + file.Name() + ".sh") + out, _, err := command.RunCmd(cmd) + if err != nil { + log.LogDirect(out) + log.Errorf("Error processing configuration in folder %v: %v", file.Name(), err) + return err + } log.LogDirect(out) - log.Errorf("Error processing configuration in folder %v: %v", file.Name(), err) - return err } - log.LogDirect(out) } } } @@ -208,15 +228,6 @@ func initialIntegrationServerConfig() error { log.Println("Initial configuration of integration server complete") - log.Println("Discovering override ports") - - out, _, err := command.Run("bash", "ace_discover_port_overrides.sh") - if err != nil { - log.Errorf("Error discovering override ports: %v", string(out)) - return err - } - log.Println("Successfully discovered override ports") - return nil } @@ -226,7 +237,7 @@ func SortFileNameAscend(files []os.FileInfo) { }) } -func createSHAServerConfYaml() error { +func createSHAServerConfYamlLocal() error { oldserverconfContent, readError := readServerConfFile() if readError != nil { @@ -238,7 +249,7 @@ func createSHAServerConfYaml() error { } serverconfMap := make(map[interface{}]interface{}) - unmarshallError := yaml.Unmarshal([]byte(oldserverconfContent), &serverconfMap) + unmarshallError := yamlUnmarshal([]byte(oldserverconfContent), &serverconfMap) if unmarshallError != nil { log.Errorf("Error unmarshalling server.conf.yaml: %v", unmarshallError) return unmarshallError @@ -246,19 +257,19 @@ func createSHAServerConfYaml() error { if serverconfMap["RestAdminListener"] == nil { serverconfMap["RestAdminListener"] = map[string]interface{}{ - "webUserPasswordHashAlgorithm": "SHA-1", + "authorizationEnabled": true, + "authorizationMode": "file", + "basicAuth": true, } - log.Printf("Updating RestAdminListener/webUserPasswordHashAlgorithm") } else { restAdminListener := serverconfMap["RestAdminListener"].(map[interface{}]interface{}) - if restAdminListener["webUserPasswordHashAlgorithm"] == nil { - restAdminListener["webUserPasswordHashAlgorithm"] = "SHA-1" - log.Printf("Updating RestAdminListener/webUserPasswordHashAlgorithm") - } + restAdminListener["authorizationEnabled"] = true + restAdminListener["authorizationMode"] = "file" + restAdminListener["basicAuth"] = true } - serverconfYaml, marshallError := yaml.Marshal(&serverconfMap) + serverconfYaml, marshallError := yamlMarshal(&serverconfMap) if marshallError != nil { log.Errorf("Error marshalling server.conf.yaml: %v", marshallError) return marshallError @@ -333,14 +344,14 @@ func enableOpenTracingInServerConf() error { } // readServerConfFile returns the content of the server.conf.yaml file in the overrides folder -func readServerConfFile() ([]byte, error) { +func readServerConfFileLocal() ([]byte, error) { content, err := ioutil.ReadFile("/home/aceuser/ace-server/overrides/server.conf.yaml") return content, err } // writeServerConfFile writes the yaml content to the server.conf.yaml file in the overrides folder // It creates the file if it doesn't already exist -func writeServerConfFile(content []byte) error { +func writeServerConfFileLocal(content []byte) error { writeError := ioutil.WriteFile("/home/aceuser/ace-server/overrides/server.conf.yaml", content, 0644) if writeError != nil { log.Errorf("Error writing server.conf.yaml: %v", writeError) @@ -384,7 +395,7 @@ func enableAdminsslInServerConf() error { // It returns the updated server.conf.yaml content func addMetricsToServerConf(serverconfContent []byte) ([]byte, error) { serverconfMap := make(map[interface{}]interface{}) - unmarshallError := yaml.Unmarshal([]byte(serverconfContent), &serverconfMap) + unmarshallError := yamlUnmarshal([]byte(serverconfContent), &serverconfMap) if unmarshallError != nil { log.Errorf("Error unmarshalling server.conf.yaml: %v", unmarshallError) return nil, unmarshallError @@ -541,7 +552,7 @@ func addAdminsslToServerConf(serverconfContent []byte) ([]byte, error) { // getConfigurationFromContentServer checks if ACE_CONTENT_SERVER_URL exists. If so then it pulls // a bar file from that URL -func getConfigurationFromContentServer() error { +func getConfigurationFromContentServerLocal() error { // ACE_CONTENT_SERVER_URL can contain 1 or more comma separated urls urls := os.Getenv("ACE_CONTENT_SERVER_URL") @@ -609,13 +620,33 @@ func getConfigurationFromContentServer() error { log.Errorf("Error parsing content server url : %v", err) return err } - filename := "/home/aceuser/initial-config/bars/" + path.Base(u.Path) + ".bar" - // temporarily override the bar name with "barfile.bar" if we only have ONE bar file until mq connector is fixed to support any bar name + var filename string if len(urlArray) == 1 { + // temporarily override the bar name with "barfile.bar" if we only have ONE bar file until mq connector is fixed to support any bar name filename = "/home/aceuser/initial-config/bars/barfile.bar" + } else { + // Multiple bar support. Need to loop to check that the file does not already exist + // (case where multiple bars have the same name) + isAvailable := false + count := 0 + for !isAvailable { + if count == 0 { + filename = "/home/aceuser/initial-config/bars/" + path.Base(u.Path) + ".bar" + } else { + filename = "/home/aceuser/initial-config/bars/" + path.Base(u.Path) + "-" + fmt.Sprint(count) + ".bar" + log.Printf("Previous path already in use. Testing filename: " + filename) + } + + if _, err := osStat(filename); os.IsNotExist(err) { + log.Printf("No existing file on that path so continuing") + isAvailable = true + } + count++ + } } - log.Printf("Will saving bar as: " + filename) + + log.Printf("Will save bar as: " + filename) file, err := osCreate(filename) if err != nil { @@ -946,13 +977,13 @@ func addforceFlowsHttpsToServerConf(serverconfContent []byte) ([]byte, error) { func generatePassword(length int64) string { var i, e = big.NewInt(length), big.NewInt(10) - bigInt, _ := rand.Int(rand.Reader, i.Exp(e, i, nil) ) + bigInt, _ := rand.Int(rand.Reader, i.Exp(e, i, nil)) return bigInt.String() } func watchForceFlowsHTTPSSecret(password string) *fsnotify.Watcher { - //set up watch on the /home/aceuser/httpsNodeCerts/tls.key file + //set up watch on the /home/aceuser/httpsNodeCerts/tls.key file watcher, err := fsnotify.NewWatcher() if err != nil { log.Errorf("Error creating new watcher for Force Flows to be HTTPS: %v", err) @@ -984,7 +1015,6 @@ func watchForceFlowsHTTPSSecret(password string) *fsnotify.Watcher { } } - case err, ok := <-watcher.Errors: log.Println("error from Force Flows to be HTTPS watcher:", err) if !ok { @@ -1003,13 +1033,13 @@ func localGenerateHTTPSKeystore(privateKeyLocation string, certificateLocation s // create /home/aceuser/ace-server/https-keystore.p12 using: // single /home/aceuser/httpsNodeCerts/tls.key // single /home/aceuser/httpsNodeCerts/tls.crt - + //Script version: openssl pkcs12 -export -in ${certfile} -inkey ${keyfile} -out /home/aceuser/ace-server/https-keystore.p12 -name ${alias} -password pass:${1} 2>&1) // Load the private key file into a rsa.PrivateKey - privateKeyFile, err := ioutil.ReadFile(privateKeyLocation) + privateKeyFile, err := ioutil.ReadFile(privateKeyLocation) if err != nil { - log.Error("Error loading " + privateKeyLocation, err) + log.Error("Error loading "+privateKeyLocation, err) } privateKeyPem, _ := pem.Decode(privateKeyFile) if privateKeyPem.Type != "RSA PRIVATE KEY" { @@ -1017,34 +1047,34 @@ func localGenerateHTTPSKeystore(privateKeyLocation string, certificateLocation s } privateKeyPemBytes := privateKeyPem.Bytes parsedPrivateKey, err := x509.ParsePKCS1PrivateKey(privateKeyPemBytes) - if err != nil { - log.Error("Error parsing " + privateKeyLocation + " RSA PRIVATE KEY", err) + if err != nil { + log.Error("Error parsing "+privateKeyLocation+" RSA PRIVATE KEY", err) } // Load the single cert file into a x509.Certificate certificateFile, err := ioutil.ReadFile(certificateLocation) if err != nil { - log.Error("Error loading " + certificateLocation, err) + log.Error("Error loading "+certificateLocation, err) } certificatePem, _ := pem.Decode(certificateFile) if certificatePem.Type != "CERTIFICATE" { - log.Error(certificateLocation +" is not CERTIFICATE type ", certificatePem.Type) + log.Error(certificateLocation+" is not CERTIFICATE type ", certificatePem.Type) } certificatePemBytes := certificatePem.Bytes parsedCertificate, err := x509.ParseCertificate(certificatePemBytes) - if err != nil { - log.Error("Error parsing " + certificateLocation +" CERTIFICATE", err) + if err != nil { + log.Error("Error parsing "+certificateLocation+" CERTIFICATE", err) } // Create Keystore pfxBytes, err := pkcs12.Encode(rand.Reader, parsedPrivateKey, parsedCertificate, []*x509.Certificate{}, password) - if err != nil { - log.Error("Error creating the " + keystoreLocation, err) + if err != nil { + log.Error("Error creating the "+keystoreLocation, err) } // Write out the Keystore 600 (rw- --- ---) err = ioutil.WriteFile(keystoreLocation, pfxBytes, 0600) - if err != nil { + if err != nil { log.Error(err) } } @@ -1057,7 +1087,7 @@ func localPatchHTTPSConnector(uds string) { // HTTP/1.1 200 OK // Content-Length: 0 // Content-Type: application/json - + // use unix domain socket httpc := http.Client{ Transport: &http.Transport{ diff --git a/cmd/runaceserver/integrationserver_internal_test.go b/cmd/runaceserver/integrationserver_internal_test.go index 00dd317..9f91abe 100644 --- a/cmd/runaceserver/integrationserver_internal_test.go +++ b/cmd/runaceserver/integrationserver_internal_test.go @@ -16,18 +16,19 @@ limitations under the License. package main import ( - "errors" - "path/filepath" - "net" - "net/http" - "strings" - "time" - "io" - "io/ioutil" - "os" - "testing" - "github.com/ot4i/ace-docker/common/logger" - "github.com/stretchr/testify/assert" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ot4i/ace-docker/common/logger" + "github.com/stretchr/testify/assert" ) var yamlTests = []struct { @@ -361,6 +362,7 @@ func TestGetConfigurationFromContentServer(t *testing.T) { barDirPath := "/home/aceuser/initial-config/bars" var osMkdirRestore = osMkdir var osCreateRestore = osCreate + var osStatRestore = osStat var ioutilReadFileRestore = ioutilReadFile var ioCopyRestore = ioCopy var contentserverGetBARRestore = contentserverGetBAR @@ -393,6 +395,9 @@ func TestGetConfigurationFromContentServer(t *testing.T) { osCreate = func(file string) (*os.File, error) { panic("should be mocked") } + osStat = func(file string) (os.FileInfo, error) { + panic("should be mocked") + } ioCopy = func(target io.Writer, source io.Reader) (int64, error) { panic("should be mocked") } @@ -407,6 +412,7 @@ func TestGetConfigurationFromContentServer(t *testing.T) { var restore = func() { osMkdir = osMkdirRestore osCreate = osCreateRestore + osStat = osStatRestore ioutilReadFile = ioutilReadFileRestore ioCopy = ioCopyRestore contentserverGetBAR = contentserverGetBARRestore @@ -482,6 +488,13 @@ func TestGetConfigurationFromContentServer(t *testing.T) { return nil, nil } + + osStat = func(file string) (os.FileInfo, error) { + // Should not be called + t.Errorf("Should not check if file exist when only single bar URL") + return nil, nil + } + ioutilReadFile = func(cafile string) ([]byte, error) { assert.Equal(t, "/home/aceuser/ssl/cacert.pem", cafile) return []byte(dummyCert), nil @@ -564,6 +577,12 @@ func TestGetConfigurationFromContentServer(t *testing.T) { return nil, nil } + osStat = func(file string) (os.FileInfo, error) { + // Should not be called + t.Errorf("Should not check if file exist when only single bar URL") + return nil, nil + } + ioutilReadFile = func(cafile string) ([]byte, error) { assert.Equal(t, "/home/aceuser/ssl/mycustom.pem", cafile) return []byte(dummyCert), nil @@ -617,6 +636,12 @@ func TestGetConfigurationFromContentServer(t *testing.T) { return nil, nil } + osStat = func(file string) (os.FileInfo, error) { + // Should not be called + t.Errorf("Should not check if file exist when only single bar URL") + return nil, nil + } + ioutilReadFile = func(cafile string) ([]byte, error) { assert.Equal(t, "/home/aceuser/ssl/mycustom.pem", cafile) return []byte(dummyCert), nil @@ -711,6 +736,10 @@ func TestGetConfigurationFromContentServer(t *testing.T) { return nil, nil } + osStat = func(file string) (os.FileInfo, error) { + return nil, os.ErrNotExist + } + ioutilReadFile = func(cafile string) ([]byte, error) { assert.Equal(t, "/home/aceuser/ssl/cacert.pem", cafile) return []byte(dummyCert), nil @@ -745,6 +774,81 @@ func TestGetConfigurationFromContentServer(t *testing.T) { assert.Nil(t, errorReturned) }) + + t.Run("Creates multiple files with different names when using multi bar support and the bar file names are all the same", func(t *testing.T) { + + //https://alexdash-ibm-ace-dashboard-prod:3443/v1/directories/CustomerDatabaseV1?userid=fsdjfhksdjfhsd + var barName = "CustomerDatabaseV1" + var contentServerName = "alexdash-ibm-ace-dashboard-prod" + var barAuth = "userid=fsdjfhksdjfhsd" + var barUrlBase = "https://" + contentServerName + ":3443/v1/directories/" + barName + var barUrlFull = barUrlBase + "?" + barAuth + + var barUrl = barUrlFull + "," + barUrlFull + "," + barUrlFull + + testReadCloser := ioutil.NopCloser(strings.NewReader("test")) + + os.Unsetenv("DEFAULT_CONTENT_SERVER") + os.Setenv("ACE_CONTENT_SERVER_URL", barUrl) + os.Unsetenv("ACE_CONTENT_SERVER_NAME") + os.Unsetenv("ACE_CONTENT_SERVER_TOKEN") + os.Setenv("CONTENT_SERVER_CERT", "cacert") + os.Setenv("CONTENT_SERVER_KEY", "cakey") + + osMkdir = func(dirPath string, mode os.FileMode) error { + assert.Equal(t, barDirPath, dirPath) + assert.Equal(t, os.ModePerm, mode) + return nil + } + + createdFiles := map[string]bool{} + osCreateCall := 1 + osCreate = func(file string) (*os.File, error) { + createdFiles[file] = true + if osCreateCall == 1 { + assert.Equal(t, "/home/aceuser/initial-config/bars/"+barName+".bar", file) + } else if osCreateCall == 2 { + assert.Equal(t, "/home/aceuser/initial-config/bars/"+barName+"-1.bar", file) + } else if osCreateCall == 3 { + assert.Equal(t, "/home/aceuser/initial-config/bars/"+barName+"-2.bar", file) + } + osCreateCall = osCreateCall + 1 + return nil, nil + } + + osStat = func(file string) (os.FileInfo, error) { + if createdFiles[file] { + return nil, os.ErrExist + } else { + return nil, os.ErrNotExist + } + } + + ioutilReadFile = func(cafile string) ([]byte, error) { + assert.Equal(t, "/home/aceuser/ssl/cacert.pem", cafile) + return []byte(dummyCert), nil + } + + getBarCall := 1 + contentserverGetBAR = func(url string, serverName string, token string, contentServerCACert []byte, contentServerCert string, contentServerKey string, log logger.LoggerInterface) (io.ReadCloser, error) { + assert.Equal(t, barUrlBase+"?archive=true", url) + assert.Equal(t, contentServerName, serverName) + assert.Equal(t, barAuth, token) + assert.Equal(t, []byte(dummyCert), contentServerCACert) + assert.Equal(t, "cacert", contentServerCert) + assert.Equal(t, "cakey", contentServerKey) + getBarCall = getBarCall + 1 + return testReadCloser, nil + } + + ioCopy = func(target io.Writer, source io.Reader) (int64, error) { + return 0, nil + } + + errorReturned := getConfigurationFromContentServer() + + assert.Nil(t, errorReturned) + }) } func TestWatchForceFlowsHTTPSSecret(t *testing.T) { diff --git a/cmd/runaceserver/integrationserver_test.go b/cmd/runaceserver/integrationserver_test.go new file mode 100644 index 0000000..a8afb58 --- /dev/null +++ b/cmd/runaceserver/integrationserver_test.go @@ -0,0 +1,173 @@ +package main + +import ( + "errors" + "os" + "testing" + + "github.com/ot4i/ace-docker/common/logger" + "github.com/stretchr/testify/assert" +) + +func Test_initialIntegrationServerConfig(t *testing.T) { + oldGetConfigurationFromContentServer := getConfigurationFromContentServer + getConfigurationFromContentServer = func() error { + return nil + } + t.Run("Golden path - When initial-config/webusers dir exists we call into ConfigureWebAdminUsers to process users", func(t *testing.T) { + oldCreateSHAServerConfYaml := createSHAServerConfYaml + createSHAServerConfYaml = func() error { + return nil + } + oldConfigureWebAdminUsers := ConfigureWebAdminUsers + ConfigureWebAdminUsers = func(log logger.LoggerInterface) error { + return nil + } + + homedir = "../../internal/webadmin/testdata" + initialConfigDir = "../../internal/webadmin/testdata/initial-config" + err := initialIntegrationServerConfig() + assert.NoError(t, err) + + createSHAServerConfYaml = oldCreateSHAServerConfYaml + ConfigureWebAdminUsers = oldConfigureWebAdminUsers + + }) + + t.Run("When we fail to properly configure WebAdmin users we fail and return error", func(t *testing.T) { + oldCreateSHAServerConfYaml := createSHAServerConfYaml + createSHAServerConfYaml = func() error { + return nil + } + oldConfigureWebAdminUsers := ConfigureWebAdminUsers + ConfigureWebAdminUsers = func(log logger.LoggerInterface) error { + return errors.New("Error processing WebAdmin users") + } + homedir = "../../internal/webadmin/testdata" + initialConfigDir = "../../internal/webadmin/testdata/initial-config" + err := initialIntegrationServerConfig() + assert.Error(t, err) + assert.Equal(t, "Error processing WebAdmin users", err.Error()) + + createSHAServerConfYaml = oldCreateSHAServerConfYaml + ConfigureWebAdminUsers = oldConfigureWebAdminUsers + }) + + getConfigurationFromContentServer = oldGetConfigurationFromContentServer +} + +func Test_createSHAServerConfYamlLocal(t *testing.T) { + t.Run("Golden path - Empty file gets populated with the right values", func(t *testing.T) { + + oldReadServerConfFile := readServerConfFile + readServerConfFile = func() ([]byte, error) { + return []byte{}, nil + } + oldWriteServerConfFileLocal := writeServerConfFile + writeServerConfFile = func(content []byte) error { + return nil + } + + err := createSHAServerConfYaml() + assert.NoError(t, err) + readServerConfFile = oldReadServerConfFile + writeServerConfFile = oldWriteServerConfFileLocal + }) + t.Run("Golden path 2 - Populated file gets handled and no errors", func(t *testing.T) { + + oldReadServerConfFile := readServerConfFile + readServerConfFile = func() ([]byte, error) { + file, err := os.ReadFile("../../internal/webadmin/testdata/initial-config/webusers/server.conf.yaml") + if err != nil { + t.Log(err) + t.Fail() + } + return file, nil + } + oldWriteServerConfFileLocal := writeServerConfFile + writeServerConfFile = func(content []byte) error { + return nil + } + oldYamlMarshal := yamlMarshal + yamlMarshal = func(in interface{}) (out []byte, err error) { + return nil, nil + } + + err := createSHAServerConfYaml() + assert.NoError(t, err) + readServerConfFile = oldReadServerConfFile + writeServerConfFile = oldWriteServerConfFileLocal + yamlMarshal = oldYamlMarshal + + }) + t.Run("Error reading server.conf.yaml file", func(t *testing.T) { + oldReadServerConfFile := readServerConfFile + readServerConfFile = func() ([]byte, error) { + return nil, errors.New("Error reading server.conf.yaml") + } + oldWriteServerConfFileLocal := writeServerConfFile + writeServerConfFile = func(content []byte) error { + return nil + } + + err := createSHAServerConfYaml() + assert.Error(t, err) + readServerConfFile = oldReadServerConfFile + writeServerConfFile = oldWriteServerConfFileLocal + }) + t.Run("yaml.Marshal fails to execute properly", func(t *testing.T) { + oldYamlUnmarshal := yamlUnmarshal + yamlUnmarshal = func(in []byte, out interface{}) (err error) { + return errors.New("Error unmarshalling yaml") + } + oldYamlMarshal := yamlMarshal + yamlMarshal = func(in interface{}) (out []byte, err error) { + return nil, nil + } + + err := createSHAServerConfYaml() + assert.Error(t, err) + assert.Equal(t, "Error unmarshalling yaml", err.Error()) + + yamlUnmarshal = oldYamlUnmarshal + yamlMarshal = oldYamlMarshal + }) + t.Run("yaml.Marshal fails to execute properly", func(t *testing.T) { + oldYamlUnmarshal := yamlUnmarshal + yamlUnmarshal = func(in []byte, out interface{}) (err error) { + return nil + } + oldYamlMarshal := yamlMarshal + yamlMarshal = func(in interface{}) (out []byte, err error) { + return nil, errors.New("Error marshalling yaml") + } + + err := createSHAServerConfYaml() + assert.Error(t, err) + assert.Equal(t, "Error marshalling yaml", err.Error()) + + yamlUnmarshal = oldYamlUnmarshal + yamlMarshal = oldYamlMarshal + }) + t.Run("yaml.Marshal fails to execute properly", func(t *testing.T) { + oldYamlUnmarshal := yamlUnmarshal + yamlUnmarshal = func(in []byte, out interface{}) (err error) { + return nil + } + oldYamlMarshal := yamlMarshal + yamlMarshal = func(in interface{}) (out []byte, err error) { + return nil, nil + } + oldWriteServerConfFile := writeServerConfFile + writeServerConfFile = func(content []byte) error { + return errors.New("Error writing server.conf.yaml") + } + err := createSHAServerConfYaml() + assert.Error(t, err) + assert.Equal(t, "Error writing server.conf.yaml", err.Error()) + + yamlUnmarshal = oldYamlUnmarshal + yamlMarshal = oldYamlMarshal + writeServerConfFile = oldWriteServerConfFile + }) +} diff --git a/cmd/runaceserver/main.go b/cmd/runaceserver/main.go index 645505c..bd6138f 100644 --- a/cmd/runaceserver/main.go +++ b/cmd/runaceserver/main.go @@ -29,6 +29,7 @@ import ( "github.com/ot4i/ace-docker/internal/metrics" "github.com/ot4i/ace-docker/internal/name" "github.com/ot4i/ace-docker/internal/qmgr" + "github.com/ot4i/ace-docker/internal/trace" ) func doMain() error { @@ -243,6 +244,14 @@ func doMain() error { log.Println("Integration API started") } + log.Println("Starting trace API server") + err = trace.StartServer(log, 7981) + if err != nil { + log.Println("Failed to start trace API server, you will not be able to retrieve trace through the ACE dashboard " + err.Error()) + } else { + log.Println("Trace API server started") + } + // Start reaping zombies from now on. // Start this here, so that we don't reap any sub-processes created // by this process (e.g. for crtmqm or strmqm) diff --git a/experimental/ace-minimal/Dockerfile.alpine-openjdk14 b/experimental/ace-minimal/Dockerfile.alpine-openjdk14 index fcfcc91..6406551 100644 --- a/experimental/ace-minimal/Dockerfile.alpine-openjdk14 +++ b/experimental/ace-minimal/Dockerfile.alpine-openjdk14 @@ -1,4 +1,4 @@ -FROM alpine:3.12 +FROM alpine:3.14 # docker build -t ace-minimal:11.0.0.9-alpine-openjdk14 -f Dockerfile.alpine-openjdk14 . diff --git a/experimental/old/ace-alpine-with-jdk/Dockerfile b/experimental/old/ace-alpine-with-jdk/Dockerfile index 89af9a8..4bbffb1 100644 --- a/experimental/old/ace-alpine-with-jdk/Dockerfile +++ b/experimental/old/ace-alpine-with-jdk/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.8 +FROM alpine:3.14 MAINTAINER Trevor Dolby (@tdolby) diff --git a/experimental/old/ace-minimal-install/Dockerfile.alpine-openjdk14 b/experimental/old/ace-minimal-install/Dockerfile.alpine-openjdk14 index 9bcab08..c4beac4 100644 --- a/experimental/old/ace-minimal-install/Dockerfile.alpine-openjdk14 +++ b/experimental/old/ace-minimal-install/Dockerfile.alpine-openjdk14 @@ -1,4 +1,4 @@ -FROM alpine:3.12 +FROM alpine:3.14 # docker build --build-arg DOWNLOAD_URL=http://kenya.hursley.uk.ibm.com:52367/ace-11.0.0.9.tar.gz --build-arg PRODUCT_LABEL=ace-11.0.0.9 -t ace-minimal-install:11.0.0.9-alpine-openjdk14 -f Dockerfile.alpine-openjdk14 . diff --git a/experimental/old/ace-minimal-install/Dockerfile.alpine-openjdk15 b/experimental/old/ace-minimal-install/Dockerfile.alpine-openjdk15 index a2e2656..6c49eda 100644 --- a/experimental/old/ace-minimal-install/Dockerfile.alpine-openjdk15 +++ b/experimental/old/ace-minimal-install/Dockerfile.alpine-openjdk15 @@ -1,4 +1,4 @@ -FROM alpine:3.12 +FROM alpine:3.14 # docker build --build-arg DOWNLOAD_URL=http://kenya.hursley.uk.ibm.com:52367/ace-11.0.0.9.tar.gz --build-arg PRODUCT_LABEL=ace-11.0.0.9 -t ace-minimal-install:11.0.0.9-alpine-openjdk15 -f Dockerfile.alpine-openjdk15 . diff --git a/experimental/old/ace-minimal-install/Dockerfile.mount-from-host b/experimental/old/ace-minimal-install/Dockerfile.mount-from-host index 548aaa1..c0b50cd 100644 --- a/experimental/old/ace-minimal-install/Dockerfile.mount-from-host +++ b/experimental/old/ace-minimal-install/Dockerfile.mount-from-host @@ -1,4 +1,4 @@ -FROM alpine:3.11 +FROM alpine:3.14 # docker build --build-arg DOWNLOAD_URL=http://kenya.hursley.uk.ibm.com:52367/ace-11.0.0.9.tar.gz --build-arg PRODUCT_LABEL=ace-11.0.0.9 -t ace-minimal-install:11.0.0.9-alpine-openjdk15 -f Dockerfile.alpine-openjdk15 . diff --git a/experimental/old/ace-minimal/Dockerfile b/experimental/old/ace-minimal/Dockerfile index bd7913e..b98c89e 100644 --- a/experimental/old/ace-minimal/Dockerfile +++ b/experimental/old/ace-minimal/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:18.04 +FROM ubuntu:20.04 # Prevent errors about having no terminal when using apt-get ENV DEBIAN_FRONTEND noninteractive diff --git a/experimental/windows/server2019/Dockerfile b/experimental/windows/server2019/Dockerfile index 947787d..161e030 100644 --- a/experimental/windows/server2019/Dockerfile +++ b/experimental/windows/server2019/Dockerfile @@ -1,4 +1,4 @@ -FROM mcr.microsoft.com/windows/servercore:1809 +FROM mcr.microsoft.com/windows/servercore:2009 WORKDIR c:\\tmp COPY ACESetup11.0.16742.4.exe c:\\tmp\\ACESetup.exe diff --git a/go.mod b/go.mod index f03deaa..51ed9b4 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/prometheus/client_golang v1.5.1 github.com/prometheus/client_model v0.2.0 github.com/stretchr/testify v1.4.0 + golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 gopkg.in/yaml.v2 v2.2.8 k8s.io/apimachinery v0.18.1 diff --git a/go.sum b/go.sum index 1cade72..ef804a6 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -20,7 +19,6 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aymerick/raymond v1.1.0 h1:phuNN2s67eI/HtO8CrvqFcdR2JP+BtkGJZ9n692Hr2Y= github.com/aymerick/raymond v2.0.2+incompatible h1:VEp3GpgdAnv9B2GFyTvqgcKvY+mfKMjPOA3SbKLtnU0= github.com/aymerick/raymond v2.0.2+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -36,7 +34,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -74,6 +71,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -106,8 +104,10 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -153,6 +153,7 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -165,7 +166,6 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -225,10 +225,12 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -237,6 +239,7 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= diff --git a/internal/metrics/update.go b/internal/metrics/update.go index 6bdb059..ac35b16 100644 --- a/internal/metrics/update.go +++ b/internal/metrics/update.go @@ -18,576 +18,575 @@ limitations under the License. package metrics import ( - "crypto/tls" - "crypto/x509" - "errors" - "io/ioutil" - "flag" - "fmt" - "math" - "net" - "net/url" - "net/http" - "net/http/cookiejar" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/ot4i/ace-docker/common/logger" - - "github.com/gorilla/websocket" - "github.com/prometheus/client_golang/prometheus" + "crypto/tls" + "crypto/x509" + "errors" + "flag" + "fmt" + "io/ioutil" + "math" + "net" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/ot4i/ace-docker/common/logger" + + "github.com/gorilla/websocket" + "github.com/prometheus/client_golang/prometheus" ) var ( - addr = flag.String("addr", "localhost:7600", "http service address") - startChannel = make(chan bool) - stopChannel = make(chan bool, 2) - stopping = false - requestChannel = make(chan bool) - responseChannel = make(chan *MetricsMap) - statisticsChannel = make(chan StatisticsDataStruct, 10) // Block on writing to channel if we already have 10 queued so we don't retrieve any more + addr = flag.String("addr", "localhost:7600", "http service address") + startChannel = make(chan bool) + stopChannel = make(chan bool, 2) + stopping = false + requestChannel = make(chan bool) + responseChannel = make(chan *MetricsMap) + statisticsChannel = make(chan StatisticsDataStruct, 10) // Block on writing to channel if we already have 10 queued so we don't retrieve any more ) type MetricsMap struct { - sync.Mutex - internal map[string]*metricData + sync.Mutex + internal map[string]*metricData } func NewMetricsMap() *MetricsMap { - return &MetricsMap{ - internal: make(map[string]*metricData), - } + return &MetricsMap{ + internal: make(map[string]*metricData), + } } type Metric struct { - labels prometheus.Labels - value float64 + labels prometheus.Labels + value float64 } type metricData struct { - name string - description string - values map[string]*Metric - metricType MetricType - metricUnits MetricUnits - metricLevel MetricLevel + name string + description string + values map[string]*Metric + metricType MetricType + metricUnits MetricUnits + metricLevel MetricLevel } /* Normalise returns a float64 representation of the metric value normalised to a base metric type (seconds, bytes, etc.) */ func (md *metricData) Normalise(value int) float64 { - f := float64(value) + f := float64(value) - if f < 0 { - f = 0 - } + if f < 0 { + f = 0 + } - // Convert microseconds to seconds - if md.metricUnits == Microseconds { - f = f / 1000000 - } + // Convert microseconds to seconds + if md.metricUnits == Microseconds { + f = f / 1000000 + } - // Convert megabytes to bytes - if md.metricUnits == MegaBytes { - f = f * 1024 * 1024 - } + // Convert megabytes to bytes + if md.metricUnits == MegaBytes { + f = f * 1024 * 1024 + } - return f + return f } func ReadStatistics(log logger.LoggerInterface) { - // Check if the admin server is secure so we know whether to connect with wss or ws - aceAdminServerSecurity := os.Getenv("ACE_ADMIN_SERVER_SECURITY") - if aceAdminServerSecurity == "" { - log.Printf("Can't tell if ace admin server security is enabled defaulting to false") - aceAdminServerSecurity = "false" - } else { - log.Printf("ACE_ADMIN_SERVER_SECURITY is %s", aceAdminServerSecurity) - } - - var firstConnect = true - - for { - if stopping { - // Stopping will trigger a read error on the c.ReadJSON call and re-entry into this loop, - // but we want to exit this function when that happens - return - } - - var c *websocket.Conn - var dialError error - - // Use wss with TLS if using the admin server is secured - if aceAdminServerSecurity == "true" { - adminServerCACert := os.Getenv("ACE_ADMIN_SERVER_CA") - - caCertPool := x509.NewCertPool() - if stat, err := os.Stat(adminServerCACert); err == nil && stat.IsDir() { - // path is a directory load all certs - log.Printf("Using CA Certificate folder %s", adminServerCACert) - filepath.Walk(adminServerCACert, func(cert string, info os.FileInfo, err error) error { - if (strings.HasSuffix(cert, "crt.pem")) { + // Check if the admin server is secure so we know whether to connect with wss or ws + aceAdminServerSecurity := os.Getenv("ACE_ADMIN_SERVER_SECURITY") + if aceAdminServerSecurity == "" { + log.Printf("Can't tell if ace admin server security is enabled defaulting to false") + aceAdminServerSecurity = "false" + } else { + log.Printf("ACE_ADMIN_SERVER_SECURITY is %s", aceAdminServerSecurity) + } + + var firstConnect = true + + for { + if stopping { + // Stopping will trigger a read error on the c.ReadJSON call and re-entry into this loop, + // but we want to exit this function when that happens + return + } + + var c *websocket.Conn + var dialError error + + // Use wss with TLS if using the admin server is secured + if aceAdminServerSecurity == "true" { + adminServerCACert := os.Getenv("ACE_ADMIN_SERVER_CA") + + caCertPool := x509.NewCertPool() + if stat, err := os.Stat(adminServerCACert); err == nil && stat.IsDir() { + // path is a directory load all certs + log.Printf("Using CA Certificate folder %s", adminServerCACert) + filepath.Walk(adminServerCACert, func(cert string, info os.FileInfo, err error) error { + if strings.HasSuffix(cert, "crt.pem") { log.Printf("Adding Certificate %s to CA pool", cert) binaryCert, err := ioutil.ReadFile(cert) if err != nil { - log.Errorf("Error reading CA Certificate %s", err) - } + log.Errorf("Error reading CA Certificate %s", err) + } ok := caCertPool.AppendCertsFromPEM(binaryCert) if !ok { - log.Errorf("Failed to parse Certificate %s", cert) + log.Errorf("Failed to parse Certificate %s", cert) } } - return nil - }) - } else { - log.Printf("Using CA Certificate file %s", adminServerCACert) - caCert, err := ioutil.ReadFile(adminServerCACert) - if err != nil { - log.Errorf("Error reading CA Certificate %s", err) - return - } - ok := caCertPool.AppendCertsFromPEM(caCert) - if !ok { - log.Errorf("failed to parse root CA Certificate") - } - } - - // Read the key/ cert pair to create tls certificate - adminServerCert := os.Getenv("ACE_ADMIN_SERVER_CERT") - adminServerKey := os.Getenv("ACE_ADMIN_SERVER_KEY") - adminServerCerts, err := tls.LoadX509KeyPair(adminServerCert, adminServerKey) - if err != nil { - if ( adminServerCert != "" && adminServerKey != "" ) { - log.Errorf("Error reading TLS Certificates: %s", err) - return - } - } else { - log.Printf("Using provided cert and key for mutual auth") - } - - aceAdminServerName := os.Getenv("ACE_ADMIN_SERVER_NAME") - if aceAdminServerName == "" { - log.Printf("No ace admin server name available") - return - } else { - log.Printf("ACE_ADMIN_SERVER_NAME is %s", aceAdminServerName) - } - - u := url.URL{Scheme: "wss", Host: *addr, Path: "/"} - log.Printf("Connecting to %s for statistics gathering", u.String()) - d := websocket.Dialer{ - TLSClientConfig: &tls.Config{ - RootCAs: caCertPool, - Certificates: []tls.Certificate{adminServerCerts}, - ServerName: aceAdminServerName, - }, - } - - // Retrieve session if the webusers exist - contentBytes, err := ioutil.ReadFile("/home/aceuser/initial-config/webusers/admin-users.txt") - if err != nil { - log.Printf("Cannot find admin-users.txt file, not retrieving session cookie") - } else { - log.Printf("Using provided webusers/admin-users.txt for basic auth session cookie") - userPassword := strings.Fields(string(contentBytes)) - username := userPassword[0] - password := userPassword[1] + return nil + }) + } else { + log.Printf("Using CA Certificate file %s", adminServerCACert) + caCert, err := ioutil.ReadFile(adminServerCACert) + if err != nil { + log.Errorf("Error reading CA Certificate %s", err) + return + } + ok := caCertPool.AppendCertsFromPEM(caCert) + if !ok { + log.Errorf("failed to parse root CA Certificate") + } + } + + // Read the key/ cert pair to create tls certificate + adminServerCert := os.Getenv("ACE_ADMIN_SERVER_CERT") + adminServerKey := os.Getenv("ACE_ADMIN_SERVER_KEY") + adminServerCerts, err := tls.LoadX509KeyPair(adminServerCert, adminServerKey) + if err != nil { + if adminServerCert != "" && adminServerKey != "" { + log.Errorf("Error reading TLS Certificates: %s", err) + return + } + } else { + log.Printf("Using provided cert and key for mutual auth") + } + + aceAdminServerName := os.Getenv("ACE_ADMIN_SERVER_NAME") + if aceAdminServerName == "" { + log.Printf("No ace admin server name available") + return + } else { + log.Printf("ACE_ADMIN_SERVER_NAME is %s", aceAdminServerName) + } + + u := url.URL{Scheme: "wss", Host: *addr, Path: "/"} + log.Printf("Connecting to %s for statistics gathering", u.String()) + d := websocket.Dialer{ + TLSClientConfig: &tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{adminServerCerts}, + ServerName: aceAdminServerName, + }, + } + + // Retrieve session if the webusers exist + contentBytes, err := ioutil.ReadFile("/home/aceuser/initial-config/webusers/admin-users.txt") + if err != nil { + log.Printf("Cannot find admin-users.txt file, not retrieving session cookie") + } else { + log.Printf("Using provided webusers/admin-users.txt for basic auth session cookie") + userPassword := strings.Fields(string(contentBytes)) + username := userPassword[0] + password := userPassword[1] var conn *tls.Conn - httpUrl := url.URL{Scheme: "https", Host: *addr, Path: "/"} + httpUrl := url.URL{Scheme: "https", Host: *addr, Path: "/"} tlsConfig := &tls.Config{ - RootCAs: caCertPool, - Certificates: []tls.Certificate{adminServerCerts}, - ServerName: aceAdminServerName, - } - client := &http.Client{ - Transport: &http.Transport{ - DialTLS: func(network, addr string) (net.Conn, error) { - conn, err = tls.Dial(network, addr, tlsConfig) - return conn, err - }, - }, - - } - req, _ := http.NewRequest("GET", httpUrl.String(), nil) - req.SetBasicAuth(username, password) - resp, err := client.Do(req) - if err != nil{ - log.Errorf("Error retrieving session: %s", err) - } - - jar, _ := cookiejar.New(nil) - if (resp != nil) { - cookies := resp.Cookies() - jar.SetCookies(&httpUrl, cookies) - } - - if (jar.Cookies(&httpUrl) != nil) { - log.Printf("Connecting to %s using session cookie and SSL", u.String()) - d.Jar = jar - } else { - log.Printf("Connecting to %s with SSL", u.String()) - } - } - - // Create the websocket connection - c, _, dialError = d.Dial(u.String(), http.Header{"Origin": {u.String()}}) - } else { - wsUrl := url.URL{Scheme: "ws", Host: *addr, Path: "/"} - log.Printf("Connecting to %s for statistics", wsUrl.String()) - - d := websocket.DefaultDialer - - // Retrieve session if the webusers exist - contentBytes, err := ioutil.ReadFile("/home/aceuser/initial-config/webusers/admin-users.txt") - if err != nil { - log.Printf("Cannot find admin-users.txt file, not retrieving session") - } else { - log.Printf("Using provided webusers/admin-users.txt for basic auth session cookie") - userPassword := strings.Fields(string(contentBytes)) - username := userPassword[0] - password := userPassword[1] - - httpUrl := url.URL{Scheme: "http", Host: *addr, Path: "/"} - client := &http.Client{} - req, _ := http.NewRequest("GET", httpUrl.String(), nil) - req.SetBasicAuth(username, password) - resp, err := client.Do(req) - if err != nil{ - log.Errorf("Error retrieving session: %s", err) - } - - jar, _ := cookiejar.New(nil) - if (resp != nil) { - cookies := resp.Cookies() - jar.SetCookies(&httpUrl, cookies) - } - - if (jar.Cookies(&httpUrl) != nil) { - log.Printf("Connecting to %s using session cookie", wsUrl.String()) - d.Jar = jar - } else { - log.Printf("Connecting to %s without using session cookie", wsUrl.String()) - } - } - // Create the websocket connection - c, _, dialError = d.Dial(wsUrl.String(), http.Header{"Origin": {wsUrl.String()}}) - } - - if dialError == nil { - if firstConnect { - firstConnect = false - startChannel <- true - } - - defer c.Close() - - // Loop reading from websocket and put messages on the statistics statisticsChannel - // End the loop and reconnect if there is an error reading from the websocket - var readError error - for readError == nil { - var m StatisticsDataStruct - - readError = c.ReadJSON(&m) - if readError == nil { - statisticsChannel <- m - } - } - } else { - log.Errorf("Error calling ace admin server webservice endpoint %s", dialError) - log.Println("If this repeats then check you have assigned enough memory to your Pod and you aren't running out of memory") - log.Println("Sleeping for 5 seconds before retrying to connect to metrics...") - time.Sleep(5 * time.Second) - } - } + RootCAs: caCertPool, + Certificates: []tls.Certificate{adminServerCerts}, + ServerName: aceAdminServerName, + } + client := &http.Client{ + Transport: &http.Transport{ + DialTLS: func(network, addr string) (net.Conn, error) { + conn, err = tls.Dial(network, addr, tlsConfig) + return conn, err + }, + }, + } + req, _ := http.NewRequest("GET", httpUrl.String(), nil) + req.SetBasicAuth(username, password) + resp, err := client.Do(req) + if err != nil { + log.Errorf("Error retrieving session: %s", err) + } + + jar, _ := cookiejar.New(nil) + if resp != nil { + cookies := resp.Cookies() + jar.SetCookies(&httpUrl, cookies) + } + + if jar.Cookies(&httpUrl) != nil { + log.Printf("Connecting to %s using session cookie and SSL", u.String()) + d.Jar = jar + } else { + log.Printf("Connecting to %s with SSL", u.String()) + } + } + + // Create the websocket connection + c, _, dialError = d.Dial(u.String(), http.Header{"Origin": {u.String()}}) + } else { + wsUrl := url.URL{Scheme: "ws", Host: *addr, Path: "/"} + log.Printf("Connecting to %s for statistics", wsUrl.String()) + + d := websocket.DefaultDialer + + // Retrieve session if the webusers exist + contentBytes, err := ioutil.ReadFile("/home/aceuser/initial-config/webusers/admin-users.txt") + if err != nil { + log.Printf("Cannot find admin-users.txt file, not retrieving session") + } else { + log.Printf("Using provided webusers/admin-users.txt for basic auth session cookie") + userPassword := strings.Fields(string(contentBytes)) + username := userPassword[0] + password := userPassword[1] + + httpUrl := url.URL{Scheme: "http", Host: *addr, Path: "/"} + client := &http.Client{} + req, _ := http.NewRequest("GET", httpUrl.String(), nil) + req.SetBasicAuth(username, password) + resp, err := client.Do(req) + if err != nil { + log.Errorf("Error retrieving session: %s", err) + } + + jar, _ := cookiejar.New(nil) + if resp != nil { + cookies := resp.Cookies() + jar.SetCookies(&httpUrl, cookies) + } + + if jar.Cookies(&httpUrl) != nil { + log.Printf("Connecting to %s using session cookie", wsUrl.String()) + d.Jar = jar + } else { + log.Printf("Connecting to %s without using session cookie", wsUrl.String()) + } + } + // Create the websocket connection + c, _, dialError = d.Dial(wsUrl.String(), http.Header{"Origin": {wsUrl.String()}}) + } + + if dialError == nil { + if firstConnect { + firstConnect = false + startChannel <- true + } + + defer c.Close() + + // Loop reading from websocket and put messages on the statistics statisticsChannel + // End the loop and reconnect if there is an error reading from the websocket + var readError error + for readError == nil { + var m StatisticsDataStruct + + readError = c.ReadJSON(&m) + if readError == nil { + statisticsChannel <- m + } + } + } else { + log.Errorf("Error calling ace admin server webservice endpoint %s", dialError) + log.Println("If this repeats then check you have assigned enough memory to your Pod and you aren't running out of memory") + log.Println("Sleeping for 5 seconds before retrying to connect to metrics...") + time.Sleep(5 * time.Second) + } + } } // processMetrics processes publications of metric data and handles describe/collect/stop requests func processMetrics(log logger.LoggerInterface, serverName string) { - log.Println("Processing metrics...") - - metrics := initialiseMetrics(log) - - go ReadStatistics(log) - - // Handle update/describe/collect/stop requests - for { - select { - case m := <-statisticsChannel: - newMetrics, parseError := parseMetrics(log, &m) - - if parseError != nil { - log.Println("Parse Error:", parseError) - } else { - updateMetrics(log, metrics, newMetrics) - } - case <-requestChannel: - responseChannel <- metrics - case <-stopChannel: - log.Println("Stopping metrics gathering") - stopping = true - return - } - } + log.Println("Processing metrics...") + + metrics := initialiseMetrics(log) + + go ReadStatistics(log) + + // Handle update/describe/collect/stop requests + for { + select { + case m := <-statisticsChannel: + newMetrics, parseError := parseMetrics(log, &m) + + if parseError != nil { + log.Println("Parse Error:", parseError) + } else { + updateMetrics(log, metrics, newMetrics) + } + case <-requestChannel: + responseChannel <- metrics + case <-stopChannel: + log.Println("Stopping metrics gathering") + stopping = true + return + } + } } // initialiseMetrics sets initial details for all available metrics func initialiseMetrics(log logger.LoggerInterface) *MetricsMap { - metrics := NewMetricsMap() - msgFlowMetricNamesMap, msgFlowNodeMetricNamesMap := generateMetricNamesMap() - - for k, v := range msgFlowMetricNamesMap { - if v.enabled { - // Set metric details - metric := metricData{ - name: v.name, - description: v.description, - metricType: v.metricType, - metricUnits: v.metricUnits, - metricLevel: v.metricLevel, - } - metric.values = make(map[string]*Metric) - - // Add metric - metrics.internal[k] = &metric - } - } - - for k, v := range msgFlowNodeMetricNamesMap { - if v.enabled { - // Set metric details - metric := metricData{ - name: v.name, - description: v.description, - metricType: v.metricType, - metricUnits: v.metricUnits, - metricLevel: v.metricLevel, - } - metric.values = make(map[string]*Metric) - - // Add metric - metrics.internal[k] = &metric - } - } - - jvmResourceMetricNamesMap := generateResourceMetricNamesMap() - - for k, v := range jvmResourceMetricNamesMap { - if v.enabled { - // Set metric details - metric := metricData{ - name: v.name, - description: v.description, - metricType: v.metricType, - metricUnits: v.metricUnits, - metricLevel: v.metricLevel, - } - metric.values = make(map[string]*Metric) - - // Add metric - metrics.internal[k] = &metric - } - } - - return metrics + metrics := NewMetricsMap() + msgFlowMetricNamesMap, msgFlowNodeMetricNamesMap := generateMetricNamesMap() + + for k, v := range msgFlowMetricNamesMap { + if v.enabled { + // Set metric details + metric := metricData{ + name: v.name, + description: v.description, + metricType: v.metricType, + metricUnits: v.metricUnits, + metricLevel: v.metricLevel, + } + metric.values = make(map[string]*Metric) + + // Add metric + metrics.internal[k] = &metric + } + } + + for k, v := range msgFlowNodeMetricNamesMap { + if v.enabled { + // Set metric details + metric := metricData{ + name: v.name, + description: v.description, + metricType: v.metricType, + metricUnits: v.metricUnits, + metricLevel: v.metricLevel, + } + metric.values = make(map[string]*Metric) + + // Add metric + metrics.internal[k] = &metric + } + } + + jvmResourceMetricNamesMap := generateResourceMetricNamesMap() + + for k, v := range jvmResourceMetricNamesMap { + if v.enabled { + // Set metric details + metric := metricData{ + name: v.name, + description: v.description, + metricType: v.metricType, + metricUnits: v.metricUnits, + metricLevel: v.metricLevel, + } + metric.values = make(map[string]*Metric) + + // Add metric + metrics.internal[k] = &metric + } + } + + return metrics } func parseMetrics(log logger.LoggerInterface, m *StatisticsDataStruct) (*MetricsMap, error) { - if m.Event == ResourceStatisticsData { - return parseResourceMetrics(log, m) - } else if m.Event == AccountingAndStatisticsData { - return parseAccountingMetrics(log, m) - } else { - return nil, fmt.Errorf("Unable to parse data with event: %d", m.Event) - } + if m.Event == ResourceStatisticsData { + return parseResourceMetrics(log, m) + } else if m.Event == AccountingAndStatisticsData { + return parseAccountingMetrics(log, m) + } else { + return nil, fmt.Errorf("Unable to parse data with event: %d", m.Event) + } } func parseAccountingMetrics(log logger.LoggerInterface, m *StatisticsDataStruct) (*MetricsMap, error) { - parsedMetrics := NewMetricsMap() - - msgFlowMetricNamesMap, msgFlowNodeMetricNamesMap := generateMetricNamesMap() - - accountingOrigin := m.Data.WMQIStatisticsAccounting.MessageFlow.AccountingOrigin - serverName := m.Data.WMQIStatisticsAccounting.MessageFlow.ExecutionGroupName - applicationName := m.Data.WMQIStatisticsAccounting.MessageFlow.ApplicationName - msgflowName := m.Data.WMQIStatisticsAccounting.MessageFlow.MessageFlowName - - if msgflowName == "" { - err := errors.New("parse error - no message flow name in statistics") - return parsedMetrics, err - } - - flowValuesMap := map[string]int{ - "MsgFlow/TotalElapsedTime": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalElapsedTime, - "MsgFlow/MaximumElapsedTime": m.Data.WMQIStatisticsAccounting.MessageFlow.MaximumElapsedTime, - "MsgFlow/MinimumElapsedTime": m.Data.WMQIStatisticsAccounting.MessageFlow.MinimumElapsedTime, - "MsgFlow/TotalCpuTime": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalCPUTime, - "MsgFlow/MaximumCpuTime": m.Data.WMQIStatisticsAccounting.MessageFlow.MaximumCPUTime, - "MsgFlow/MinimumCpuTime": m.Data.WMQIStatisticsAccounting.MessageFlow.MinimumCPUTime, - "MsgFlow/TotalSizeOfInputMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalSizeOfInputMessages, - "MsgFlow/MaximumSizeOfInputMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.MaximumSizeOfInputMessages, - "MsgFlow/MinimumSizeOfInputMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.MinimumSizeOfInputMessages, - "MsgFlow/TotalInputMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalInputMessages, - "MsgFlow/TotalCPUTimeWaiting": m.Data.WMQIStatisticsAccounting.MessageFlow.CPUTimeWaitingForInputMessage, - "MsgFlow/TotalElapsedTimeWaiting": m.Data.WMQIStatisticsAccounting.MessageFlow.ElapsedTimeWaitingForInputMessage, - "MsgFlow/NumberOfThreadsInPool": m.Data.WMQIStatisticsAccounting.MessageFlow.NumberOfThreadsInPool, - "MsgFlow/TimesMaximumNumberOfThreadsReached": m.Data.WMQIStatisticsAccounting.MessageFlow.TimesMaximumNumberOfThreadsReached, - "MsgFlow/TotalNumberOfMQErrors": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfMQErrors, - "MsgFlow/TotalNumberOfMessagesWithErrors": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfMessagesWithErrors, - "MsgFlow/TotalNumberOfErrorsProcessingMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfErrorsProcessingMessages, - "MsgFlow/TotalNumberOfTimeOutsWaitingForRepliesToAggregateMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfTimeOutsWaitingForRepliesToAggregateMessages, - "MsgFlow/TotalNumberOfCommits": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfCommits, - "MsgFlow/TotalNumberOfBackouts": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfBackouts, - } - - /* - Process flow level accounting and statistics data - */ - for k, v := range flowValuesMap { - metricDesc := msgFlowMetricNamesMap[k] - if metricDesc.enabled { - metric := metricData{ - name: metricDesc.name, - description: metricDesc.description, - metricType: metricDesc.metricType, - metricUnits: metricDesc.metricUnits, - metricLevel: metricDesc.metricLevel, - } - metric.values = make(map[string]*Metric) - metric.values[accountingOrigin+"_"+applicationName+"_"+msgflowName] = &Metric{labels: prometheus.Labels{msgflowPrefix: msgflowName, serverLabel: serverName, applicationLabel: applicationName, originLabel: accountingOrigin}, value: metric.Normalise(v)} - parsedMetrics.internal[k] = &metric - } - } - - /* - Process node level accounting and statistics data - */ - for k, v := range msgFlowNodeMetricNamesMap { - - if v.enabled { - metric := metricData{ - name: v.name, - description: v.description, - metricType: v.metricType, - metricUnits: v.metricUnits, - metricLevel: v.metricLevel, - } - metric.values = make(map[string]*Metric) - - for _, node := range m.Data.WMQIStatisticsAccounting.Nodes { - nodeValuesMap := map[string]int{ - "MsgFlowNode/TotalElapsedTime": node.TotalElapsedTime, - "MsgFlowNode/MaximumElapsedTime": node.MaximumElapsedTime, - "MsgFlowNode/MinimumElapsedTime": node.MinimumElapsedTime, - "MsgFlowNode/TotalCpuTime": node.TotalCPUTime, - "MsgFlowNode/MaximumCpuTime": node.MaximumCPUTime, - "MsgFlowNode/MinimumCpuTime": node.MinimumCPUTime, - "MsgFlowNode/TotalInvocations": node.CountOfInvocations, - "MsgFlowNode/InputTerminals": node.NumberOfInputTerminals, - "MsgFlowNode/OutputTerminals": node.NumberOfOutputTerminals, - } - msgflownodeName := node.Label - msgflownodeType := node.Type - - metric.values[accountingOrigin+"_"+applicationName+"_"+msgflowName+"_"+msgflownodeName] = &Metric{labels: prometheus.Labels{msgflownodeLabel: msgflownodeName, msgflownodeTypeLabel: msgflownodeType, msgflowLabel: msgflowName, serverLabel: serverName, applicationLabel: applicationName, originLabel: accountingOrigin}, value: metric.Normalise(nodeValuesMap[k])} - } - parsedMetrics.internal[k] = &metric - } - } - - return parsedMetrics, nil + parsedMetrics := NewMetricsMap() + + msgFlowMetricNamesMap, msgFlowNodeMetricNamesMap := generateMetricNamesMap() + + accountingOrigin := m.Data.WMQIStatisticsAccounting.MessageFlow.AccountingOrigin + serverName := m.Data.WMQIStatisticsAccounting.MessageFlow.ExecutionGroupName + applicationName := m.Data.WMQIStatisticsAccounting.MessageFlow.ApplicationName + msgflowName := m.Data.WMQIStatisticsAccounting.MessageFlow.MessageFlowName + + if msgflowName == "" { + err := errors.New("parse error - no message flow name in statistics") + return parsedMetrics, err + } + + flowValuesMap := map[string]int{ + "MsgFlow/TotalElapsedTime": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalElapsedTime, + "MsgFlow/MaximumElapsedTime": m.Data.WMQIStatisticsAccounting.MessageFlow.MaximumElapsedTime, + "MsgFlow/MinimumElapsedTime": m.Data.WMQIStatisticsAccounting.MessageFlow.MinimumElapsedTime, + "MsgFlow/TotalCpuTime": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalCPUTime, + "MsgFlow/MaximumCpuTime": m.Data.WMQIStatisticsAccounting.MessageFlow.MaximumCPUTime, + "MsgFlow/MinimumCpuTime": m.Data.WMQIStatisticsAccounting.MessageFlow.MinimumCPUTime, + "MsgFlow/TotalSizeOfInputMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalSizeOfInputMessages, + "MsgFlow/MaximumSizeOfInputMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.MaximumSizeOfInputMessages, + "MsgFlow/MinimumSizeOfInputMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.MinimumSizeOfInputMessages, + "MsgFlow/TotalInputMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalInputMessages, + "MsgFlow/TotalCPUTimeWaiting": m.Data.WMQIStatisticsAccounting.MessageFlow.CPUTimeWaitingForInputMessage, + "MsgFlow/TotalElapsedTimeWaiting": m.Data.WMQIStatisticsAccounting.MessageFlow.ElapsedTimeWaitingForInputMessage, + "MsgFlow/NumberOfThreadsInPool": m.Data.WMQIStatisticsAccounting.MessageFlow.NumberOfThreadsInPool, + "MsgFlow/TimesMaximumNumberOfThreadsReached": m.Data.WMQIStatisticsAccounting.MessageFlow.TimesMaximumNumberOfThreadsReached, + "MsgFlow/TotalNumberOfMQErrors": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfMQErrors, + "MsgFlow/TotalNumberOfMessagesWithErrors": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfMessagesWithErrors, + "MsgFlow/TotalNumberOfErrorsProcessingMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfErrorsProcessingMessages, + "MsgFlow/TotalNumberOfTimeOutsWaitingForRepliesToAggregateMessages": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfTimeOutsWaitingForRepliesToAggregateMessages, + "MsgFlow/TotalNumberOfCommits": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfCommits, + "MsgFlow/TotalNumberOfBackouts": m.Data.WMQIStatisticsAccounting.MessageFlow.TotalNumberOfBackouts, + } + + /* + Process flow level accounting and statistics data + */ + for k, v := range flowValuesMap { + metricDesc := msgFlowMetricNamesMap[k] + if metricDesc.enabled { + metric := metricData{ + name: metricDesc.name, + description: metricDesc.description, + metricType: metricDesc.metricType, + metricUnits: metricDesc.metricUnits, + metricLevel: metricDesc.metricLevel, + } + metric.values = make(map[string]*Metric) + metric.values[accountingOrigin+"_"+applicationName+"_"+msgflowName] = &Metric{labels: prometheus.Labels{msgflowPrefix: msgflowName, serverLabel: serverName, applicationLabel: applicationName, originLabel: accountingOrigin}, value: metric.Normalise(v)} + parsedMetrics.internal[k] = &metric + } + } + + /* + Process node level accounting and statistics data + */ + for k, v := range msgFlowNodeMetricNamesMap { + + if v.enabled { + metric := metricData{ + name: v.name, + description: v.description, + metricType: v.metricType, + metricUnits: v.metricUnits, + metricLevel: v.metricLevel, + } + metric.values = make(map[string]*Metric) + + for _, node := range m.Data.WMQIStatisticsAccounting.Nodes { + nodeValuesMap := map[string]int{ + "MsgFlowNode/TotalElapsedTime": node.TotalElapsedTime, + "MsgFlowNode/MaximumElapsedTime": node.MaximumElapsedTime, + "MsgFlowNode/MinimumElapsedTime": node.MinimumElapsedTime, + "MsgFlowNode/TotalCpuTime": node.TotalCPUTime, + "MsgFlowNode/MaximumCpuTime": node.MaximumCPUTime, + "MsgFlowNode/MinimumCpuTime": node.MinimumCPUTime, + "MsgFlowNode/TotalInvocations": node.CountOfInvocations, + "MsgFlowNode/InputTerminals": node.NumberOfInputTerminals, + "MsgFlowNode/OutputTerminals": node.NumberOfOutputTerminals, + } + msgflownodeName := node.Label + msgflownodeType := node.Type + + metric.values[accountingOrigin+"_"+applicationName+"_"+msgflowName+"_"+msgflownodeName] = &Metric{labels: prometheus.Labels{msgflownodeLabel: msgflownodeName, msgflownodeTypeLabel: msgflownodeType, msgflowLabel: msgflowName, serverLabel: serverName, applicationLabel: applicationName, originLabel: accountingOrigin}, value: metric.Normalise(nodeValuesMap[k])} + } + parsedMetrics.internal[k] = &metric + } + } + + return parsedMetrics, nil } func parseResourceMetrics(log logger.LoggerInterface, m *StatisticsDataStruct) (*MetricsMap, error) { - parsedResourceMetrics := NewMetricsMap() - - serverName := m.Data.ResourceStatistics.ExecutionGroupName - - for _, v := range m.Data.ResourceStatistics.ResourceType { - switch v.Name { - case "JVM": - jvmData := NewJVMData(v.ResourceIdentifier) - - jvmResourceMetricNamesMap := generateResourceMetricNamesMap() - - jvmValuesMap := map[string]int{ - "JVM/Summary/InitialMemoryInMB": jvmData.SummaryInitial, - "JVM/Summary/UsedMemoryInMB": jvmData.SummaryUsed, - "JVM/Summary/CommittedMemoryInMB": jvmData.SummaryCommitted, - "JVM/Summary/MaxMemoryInMB": jvmData.SummaryMax, - "JVM/Summary/CumulativeGCTimeInSeconds": jvmData.SummaryGCTime, - "JVM/Summary/CumulativeNumberOfGCCollections": jvmData.SummaryGCCount, - "JVM/Heap/InitialMemoryInMB": jvmData.HeapInitial, - "JVM/Heap/UsedMemoryInMB": jvmData.HeapUsed, - "JVM/Heap/CommittedMemoryInMB": jvmData.HeapCommitted, - "JVM/Heap/MaxMemoryInMB": jvmData.HeapMax, - "JVM/Native/InitialMemoryInMB": jvmData.NativeInitial, - "JVM/Native/UsedMemoryInMB": jvmData.NativeUsed, - "JVM/Native/CommittedMemoryInMB": jvmData.NativeCommitted, - "JVM/Native/MaxMemoryInMB": jvmData.NativeMax, - "JVM/ScavengerGC/CumulativeGCTimeInSeconds": jvmData.ScavengerGCTime, - "JVM/ScavengerGC/CumulativeNumberOfGCCollections": jvmData.ScavengerGCCount, - "JVM/GlobalGC/CumulativeGCTimeInSeconds": jvmData.GlobalGCTime, - "JVM/GlobalGC/CumulativeNumberOfGCCollections": jvmData.GlobalGCCount, - } - - for metricKey, metricDesc := range jvmResourceMetricNamesMap { - if metricDesc.enabled { - metric := metricData{ - name: metricDesc.name, - description: metricDesc.description, - metricType: metricDesc.metricType, - metricUnits: metricDesc.metricUnits, - metricLevel: metricDesc.metricLevel, - } - metric.values = make(map[string]*Metric) - metric.values[metricKey] = &Metric{labels: prometheus.Labels{serverLabel: serverName}, value: metric.Normalise(jvmValuesMap[metricKey])} - parsedResourceMetrics.internal[metricKey] = &metric - } - } - default: - //TODO: Support other resource statistic types - } - } - - return parsedResourceMetrics, nil + parsedResourceMetrics := NewMetricsMap() + + serverName := m.Data.ResourceStatistics.ExecutionGroupName + + for _, v := range m.Data.ResourceStatistics.ResourceType { + switch v.Name { + case "JVM": + jvmData := NewJVMData(v.ResourceIdentifier) + + jvmResourceMetricNamesMap := generateResourceMetricNamesMap() + + jvmValuesMap := map[string]int{ + "JVM/Summary/InitialMemoryInMB": jvmData.SummaryInitial, + "JVM/Summary/UsedMemoryInMB": jvmData.SummaryUsed, + "JVM/Summary/CommittedMemoryInMB": jvmData.SummaryCommitted, + "JVM/Summary/MaxMemoryInMB": jvmData.SummaryMax, + "JVM/Summary/CumulativeGCTimeInSeconds": jvmData.SummaryGCTime, + "JVM/Summary/CumulativeNumberOfGCCollections": jvmData.SummaryGCCount, + "JVM/Heap/InitialMemoryInMB": jvmData.HeapInitial, + "JVM/Heap/UsedMemoryInMB": jvmData.HeapUsed, + "JVM/Heap/CommittedMemoryInMB": jvmData.HeapCommitted, + "JVM/Heap/MaxMemoryInMB": jvmData.HeapMax, + "JVM/Native/InitialMemoryInMB": jvmData.NativeInitial, + "JVM/Native/UsedMemoryInMB": jvmData.NativeUsed, + "JVM/Native/CommittedMemoryInMB": jvmData.NativeCommitted, + "JVM/Native/MaxMemoryInMB": jvmData.NativeMax, + "JVM/ScavengerGC/CumulativeGCTimeInSeconds": jvmData.ScavengerGCTime, + "JVM/ScavengerGC/CumulativeNumberOfGCCollections": jvmData.ScavengerGCCount, + "JVM/GlobalGC/CumulativeGCTimeInSeconds": jvmData.GlobalGCTime, + "JVM/GlobalGC/CumulativeNumberOfGCCollections": jvmData.GlobalGCCount, + } + + for metricKey, metricDesc := range jvmResourceMetricNamesMap { + if metricDesc.enabled { + metric := metricData{ + name: metricDesc.name, + description: metricDesc.description, + metricType: metricDesc.metricType, + metricUnits: metricDesc.metricUnits, + metricLevel: metricDesc.metricLevel, + } + metric.values = make(map[string]*Metric) + metric.values[metricKey] = &Metric{labels: prometheus.Labels{serverLabel: serverName}, value: metric.Normalise(jvmValuesMap[metricKey])} + parsedResourceMetrics.internal[metricKey] = &metric + } + } + default: + //TODO: Support other resource statistic types + } + } + + return parsedResourceMetrics, nil } // updateMetrics updates values for all available metrics func updateMetrics(log logger.LoggerInterface, mm1 *MetricsMap, mm2 *MetricsMap) { - mm1.Lock() - mm2.Lock() - defer mm1.Unlock() - defer mm2.Unlock() - - for k, md2 := range mm2.internal { - if md1, ok := mm1.internal[k]; ok { - //Iterate over the labels - for l, m2 := range md2.values { - if m1, ok := md1.values[l]; ok { - switch md1.metricType { - case Total: - md1.values[l].value = m1.value + m2.value - case Maximum: - md1.values[l].value = math.Max(m1.value, m2.value) - case Minimum: - md1.values[l].value = math.Min(m1.value, m2.value) - case Current: - md1.values[l].value = m2.value - default: - log.Printf("Should not reach here - only a set enumeration of metric types. %d is unknown...", md1.metricType) - } - } else { - md1.values[l] = m2 - } - } - } else { - mm1.internal[k] = mm2.internal[k] - } - } + mm1.Lock() + mm2.Lock() + defer mm1.Unlock() + defer mm2.Unlock() + + for k, md2 := range mm2.internal { + if md1, ok := mm1.internal[k]; ok { + //Iterate over the labels + for l, m2 := range md2.values { + if m1, ok := md1.values[l]; ok { + switch md1.metricType { + case Total: + md1.values[l].value = m1.value + m2.value + case Maximum: + md1.values[l].value = math.Max(m1.value, m2.value) + case Minimum: + md1.values[l].value = math.Min(m1.value, m2.value) + case Current: + md1.values[l].value = m2.value + default: + log.Printf("Should not reach here - only a set enumeration of metric types. %d is unknown...", md1.metricType) + } + } else { + md1.values[l] = m2 + } + } + } else { + mm1.internal[k] = mm2.internal[k] + } + } } diff --git a/internal/trace/trace_handler.go b/internal/trace/trace_handler.go new file mode 100644 index 0000000..ce44352 --- /dev/null +++ b/internal/trace/trace_handler.go @@ -0,0 +1,411 @@ +/* +© Copyright IBM Corporation 2021 + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package trace contains code to collect trace files +package trace + +import ( + "archive/zip" + "bytes" + "crypto/sha256" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "errors" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/ot4i/ace-docker/common/logger" +) + +var log logger.LoggerInterface + +var traceDir = "/home/aceuser/ace-server/config/common/log" +var operationalLogDir = "/home/aceuser/ace-server/log" +var credsDir = "/home/aceuser/initial-config/webusers" + +var tlsEnabled = os.Getenv("ACE_ADMIN_SERVER_SECURITY") == "true" +var caPath = os.Getenv("ACE_ADMIN_SERVER_CA") +var certFile = os.Getenv("ACE_ADMIN_SERVER_CERT") +var keyFile = os.Getenv("ACE_ADMIN_SERVER_KEY") + +var credentials []Credential + +type includeFile func(string) bool +type zipFunction func(ZipWriterInterface) error + +type ReqBody struct { + Type string +} + +type ZipWriterInterface interface { + Create(string) (io.Writer, error) + Close() error +} + +type FileReaderInterface interface { + ReadFile(string) ([]byte, error) +} + +type FileReader struct{} + +func (fr *FileReader) ReadFile(path string) ([]byte, error) { + return os.ReadFile(path) +} + +type Credential struct { + Username [32]byte + Password [32]byte +} + +type ServerInterface interface { + Start(address string, mux *http.ServeMux) + StartTLS(address string, mux *http.ServeMux, caCertPool *x509.CertPool, certPath string, keyPath string) +} + +type Server struct{} + +func (s *Server) Start(address string, mux *http.ServeMux) { + server := &http.Server{ + Addr: address, + Handler: mux, + } + go func() { + err := server.ListenAndServe() + if err != nil { + log.Println("Trace API server terminated with error " + err.Error()) + } else { + log.Println("Trace API server terminated") + } + }() +} + +func (s *Server) StartTLS(address string, mux *http.ServeMux, caCertPool *x509.CertPool, certFile string, keyFile string) { + server := &http.Server{ + Addr: address, + Handler: mux, + TLSConfig: &tls.Config{ + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + }, + } + go func() { + err := server.ListenAndServeTLS(certFile, keyFile) + if err != nil { + log.Println("Trace API server terminated with error " + err.Error()) + } else { + log.Println("Trace API server terminated") + } + }() +} + +func StartServer(logger logger.LoggerInterface, portNumber int) error { + log = logger + + err := readBasicAuthCreds(credsDir, &FileReader{}) + if err != nil { + log.Println("Failed to read basic auth credentials. Error: " + err.Error()) + return err + } + return startTraceServer(&Server{}, portNumber) +} + +func startTraceServer(server ServerInterface, portNumber int) error { + address := ":" + strconv.Itoa(portNumber) + + mux := http.NewServeMux() + serviceTraceHandler := http.HandlerFunc(serviceTraceRouterHandler) + userTraceHandler := http.HandlerFunc(userTraceRouterHandler) + mux.Handle("/collect-service-trace", basicAuthMiddlware(serviceTraceHandler)) + mux.Handle("/collect-user-trace", basicAuthMiddlware(userTraceHandler)) + + if tlsEnabled { + caCertPool, err := getCACertPool(caPath, &FileReader{}) + if err != nil { + return err + } + server.StartTLS(address, mux, caCertPool, certFile, keyFile) + } else { + server.Start(address, mux) + } + return nil +} + +func userTraceRouterHandler(res http.ResponseWriter, req *http.Request) { + traceRouteHandler(res, req, zipUserTrace) +} + +func serviceTraceRouterHandler(res http.ResponseWriter, req *http.Request) { + traceRouteHandler(res, req, zipServiceTrace) +} + +func traceRouteHandler(res http.ResponseWriter, req *http.Request, zipFunc zipFunction) { + if req.Method != http.MethodPost { + res.WriteHeader(http.StatusMethodNotAllowed) + return + } + + res.Header().Set("Transfer-Encoding", "chunked") + res.Header().Set("Content-Disposition", "attachment; filename=\"trace.zip\"") + + zipWriter := zip.NewWriter(res) + defer zipWriter.Close() + + err := zipFunc(zipWriter) + + if err != nil { + http.Error(res, err.Error(), http.StatusInternalServerError) + } +} + +func zipUserTrace(zipWriter ZipWriterInterface) error { + err := zipDir(traceDir, zipWriter, func(fileName string) bool { + return strings.Contains(fileName, ".userTrace.") + }) + if err != nil { + log.Error("Failed to collect user trace. Error: " + err.Error()) + return err + } + + return nil +} + +func zipServiceTrace(zipWriter ZipWriterInterface) error { + err := zipDir(traceDir, zipWriter, func(fileName string) bool { + return strings.Contains(fileName, ".trace.") || strings.Contains(fileName, ".exceptionLog.") + }) + if err != nil { + log.Error("Failed to collect service trace and exception logs. Error: " + err.Error()) + return err + } + + err = zipDir(operationalLogDir, zipWriter, func(fileName string) bool { + return strings.Contains(fileName, ".designerflows.") || strings.Contains(fileName, ".designereventflows.") + }) + if err != nil { + log.Error("Failed to collect designer operational logs. Error: " + err.Error()) + return err + } + + err = runOSCommand(zipWriter, "env.txt", "env") + if err != nil { + log.Error("Failed to get integration server env. Error: " + err.Error()) + return err + } + + err = runOSCommand(zipWriter, "ps -ewww.txt", "ps", "-ewww") + if err != nil { + log.Error("Failed to get integration server env. Error: " + err.Error()) + return err + } + + return nil +} + +func runOSCommand(zipWriter ZipWriterInterface, filename string, command string, arg ...string) error { + cmd := exec.Command(command, arg...) + var out bytes.Buffer + cmd.Stdout = &out + + err := cmd.Run() + if err != nil { + log.Error("Unable to run command " + command + ": " + err.Error()) + return err + } + + outBytes := out.Bytes() + + zipEntry, err := zipWriter.Create(filename) + if err != nil { + log.Error("Failed to write header for " + filename) + return err + } + + if _, err := zipEntry.Write(outBytes); err != nil { + log.Error("Failed to add " + filename + " to archive") + return err + } + + return nil +} + +func zipDir(traceDir string, zipWriter ZipWriterInterface, testFunc includeFile) error { + log.Println("Creating archive of " + traceDir) + stat, err := os.Stat(traceDir) + if err != nil { + log.Error("Directory " + traceDir + " does not exist") + return err + } + + if !stat.Mode().IsDir() { + log.Error(traceDir + " is not a directory") + return errors.New(traceDir + " is not a directory") + } + + return filepath.Walk(traceDir, func(path string, fileInfo os.FileInfo, err error) error { + if fileInfo.Mode().IsDir() { + return nil + } + if testFunc(fileInfo.Name()) { + return zipFile(path, zipWriter) + } + return nil + }) +} + +func zipFile(path string, zipWriter ZipWriterInterface) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + if fileInfo, err := file.Stat(); err == nil { + log.Println("Adding " + fileInfo.Name() + " to archive") + + zipEntry, err := zipWriter.Create(fileInfo.Name()) + if err != nil { + log.Error("Failed to write header for " + fileInfo.Name()) + return err + } + + if _, err := io.Copy(zipEntry, file); err != nil { + log.Error("Failed to add " + fileInfo.Name() + " to archive") + return err + } + } + + return nil +} + +func readBasicAuthCreds(credsDir string, fileReader FileReaderInterface) error { + stat, err := os.Stat(credsDir) + if err != nil { + return err + } + + if !stat.Mode().IsDir() { + return errors.New(credsDir + " is not a directory") + } + + return filepath.Walk(credsDir, func(path string, fileInfo os.FileInfo, err error) error { + if fileInfo.Mode().IsDir() { + return nil + } + + fileName := fileInfo.Name() + + if fileName == "admin-users.txt" || fileName == "operator-users.txt" { + file, err := fileReader.ReadFile(path) + if err != nil { + return err + } + + fileString := strings.TrimSpace(string(file)) + + lines := strings.Split(fileString, "\n") + for _, line := range lines { + if line != "" && !strings.HasPrefix(line, "#") { + fields := strings.Fields(line) + if len(fields) != 2 { + return errors.New("Unable to parse " + fileName) + } + // using hashes means that the length of the byte array to compare is always the same + credentials = append(credentials, Credential{ + Username: sha256.Sum256([]byte(fields[0])), + Password: sha256.Sum256([]byte(fields[1])), + }) + } + } + + log.Println("Added credentials from " + fileName + " to trace router") + } + return nil + }) +} + +func basicAuthMiddlware(next http.Handler) http.Handler { + return http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + username, password, ok := req.BasicAuth() + + if ok { + usernameHash := sha256.Sum256([]byte(username)) + passwordHash := sha256.Sum256([]byte(password)) + + for _, credential := range credentials { + // subtle.ConstantTimeCompare takes the same amount of time to run, regardless of whether the slices match or not + usernameMatch := subtle.ConstantTimeCompare(usernameHash[:], credential.Username[:]) + passwordMatch := subtle.ConstantTimeCompare(passwordHash[:], credential.Password[:]) + if usernameMatch+passwordMatch == 2 { + next.ServeHTTP(res, req) + return + } + } + } + + http.Error(res, "Unauthorized", http.StatusUnauthorized) + }) +} + +func getCACertPool(caPath string, fileReader FileReaderInterface) (*x509.CertPool, error) { + caCertPool := x509.NewCertPool() + + stat, err := os.Stat(caPath) + + if err != nil { + log.Printf("%s does not exist", caPath) + return nil, err + } + + if stat.IsDir() { + // path is a directory load all certs + log.Printf("Using CA Certificate folder %s", caPath) + filepath.Walk(caPath, func(cert string, info os.FileInfo, err error) error { + if strings.HasSuffix(cert, "crt.pem") { + log.Printf("Adding Certificate %s to CA pool", cert) + binaryCert, err := fileReader.ReadFile(cert) + if err != nil { + log.Printf("Error reading CA Certificate %s", err.Error()) + return nil + } + ok := caCertPool.AppendCertsFromPEM(binaryCert) + if !ok { + log.Printf("Failed to parse Certificate %s", cert) + } + } + return nil + }) + } else { + log.Printf("Using CA Certificate file %s", caPath) + caCert, err := fileReader.ReadFile(caPath) + if err != nil { + log.Errorf("Error reading CA Certificate %s", err) + return nil, err + } + ok := caCertPool.AppendCertsFromPEM(caCert) + if !ok { + log.Error("Failed to parse root CA Certificate") + return nil, errors.New("failed to parse root CA Certificate") + } + } + + return caCertPool, nil +} diff --git a/internal/trace/trace_handler_test.go b/internal/trace/trace_handler_test.go new file mode 100644 index 0000000..fe32eec --- /dev/null +++ b/internal/trace/trace_handler_test.go @@ -0,0 +1,876 @@ +/* +© Copyright IBM Corporation 2021 + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "archive/zip" + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/pem" + "errors" + "io" + "math/big" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/ot4i/ace-docker/common/logger" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testLogger, _ = logger.NewLogger(os.Stdout, true, true, "test") + +func TestStartTraceServer(t *testing.T) { + log = testLogger + + t.Run("Error creating cert pool", func(t *testing.T) { + tlsEnabled = true + caPath = "capath" + err := startTraceServer(&Server{}, 7891) + require.Error(t, err) + }) + + t.Run("TLS server when ACE_ADMIN_SERVER_SECURITY is true", func(t *testing.T) { + defer func() { + os.RemoveAll("capath") + }() + tlsEnabled = true + caPath = "capath" + certFile = "certfile" + keyFile = "keyfile" + + err := os.MkdirAll("capath", 0755) + require.NoError(t, err) + + startTraceServer(&TestServer{ + t: t, + expectedAddress: ":7891", + expectedCertFile: certFile, + expectedKeyFile: keyFile, + }, 7891) + }) + + t.Run("HTTP server when ACE_ADMIN_SERVER_SECURITY is not true", func(t *testing.T) { + tlsEnabled = false + startTraceServer(&TestServer{ + t: t, + expectedAddress: ":7891", + }, 7891) + }) +} + +func TestUserTraceRouterHandler(t *testing.T) { + log = testLogger + + handler := http.HandlerFunc(userTraceRouterHandler) + url := "/collect-user-trace" + + t.Run("Sends an error if not a POST request", func(t *testing.T) { + request, _ := http.NewRequest("GET", url, nil) + response := httptest.NewRecorder() + + handler.ServeHTTP(response, request) + assert.Equal(t, http.StatusMethodNotAllowed, response.Code) + }) + + t.Run("Sends an error if the trace can't be collected", func(t *testing.T) { + traceDir = "test/trace" + request, _ := http.NewRequest("POST", url, nil) + response := httptest.NewRecorder() + + handler.ServeHTTP(response, request) + assert.Equal(t, http.StatusInternalServerError, response.Code) + }) + + t.Run("Streams a zip file for user trace", func(t *testing.T) { + defer restoreUserTrace() + setUpUserTrace(t) + + request, _ := http.NewRequest("POST", url, nil) + response := httptest.NewRecorder() + + handler.ServeHTTP(response, request) + require.Equal(t, http.StatusOK, response.Code) + + body, _ := io.ReadAll(response.Body) + zipReader, err := zip.NewReader(bytes.NewReader(body), int64(len(body))) + require.NoError(t, err) + + files := checkZip(t, zipReader) + assert.Len(t, files, 1) + assert.Contains(t, files, "test.userTrace.txt") + }) +} + +func TestServiceTraceRouteHandler(t *testing.T) { + log = testLogger + + handler := http.HandlerFunc(serviceTraceRouterHandler) + url := "/collect-service-trace" + + t.Run("Sends an error if not a POST request", func(t *testing.T) { + request, _ := http.NewRequest("GET", url, nil) + response := httptest.NewRecorder() + + handler.ServeHTTP(response, request) + assert.Equal(t, http.StatusMethodNotAllowed, response.Code) + }) + + t.Run("Sends an error if the trace can't be collected", func(t *testing.T) { + traceDir = "test/trace" + + request, _ := http.NewRequest("POST", url, nil) + response := httptest.NewRecorder() + + handler.ServeHTTP(response, request) + assert.Equal(t, http.StatusInternalServerError, response.Code) + }) + + t.Run("Streams a zip file for service trace", func(t *testing.T) { + defer restoreServiceTrace() + setUpServiceTrace(t) + + request, _ := http.NewRequest("POST", url, nil) + response := httptest.NewRecorder() + + handler.ServeHTTP(response, request) + require.Equal(t, http.StatusOK, response.Code) + + body, _ := io.ReadAll(response.Body) + zipReader, err := zip.NewReader(bytes.NewReader(body), int64(len(body))) + require.NoError(t, err) + + files := checkZip(t, zipReader) + assert.Len(t, files, 6) + assert.Contains(t, files, "test.trace.txt") + assert.Contains(t, files, "test.exceptionLog.txt") + assert.Contains(t, files, "test.designerflows.txt") + assert.Contains(t, files, "test.designereventflows.txt") + assert.Contains(t, files, "env.txt") + assert.Contains(t, files, "ps -ewww.txt") + }) +} + +func TestZipUserTrace(t *testing.T) { + log = testLogger + + defer restoreUserTrace() + setUpUserTrace(t) + + t.Run("Builds a zip with user trace", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + + err := zipUserTrace(zipWriter) + require.NoError(t, err) + + zipWriter.Close() + + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + filesInZip := checkZip(t, zipReader) + assert.Len(t, filesInZip, 1) + assert.Contains(t, filesInZip, "test.userTrace.txt") + }) + + t.Run("Returns an error when it can't collect user trace", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := FailOnFileNameZipWriter{ + failOnFileName: "test.userTrace.txt", + zipWriter: zip.NewWriter(&buffer), + } + err := zipUserTrace(zipWriter) + assert.EqualError(t, err, "Unable to write test.userTrace.txt") + }) +} + +func TestZipServiceTrace(t *testing.T) { + log = testLogger + + defer restoreServiceTrace() + setUpServiceTrace(t) + + t.Run("Builds a zip with service trace, exception logs, designer operational logs, env, and ps -ewww output", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + + err := zipServiceTrace(zipWriter) + require.NoError(t, err) + + zipWriter.Close() + + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + files := checkZip(t, zipReader) + assert.Len(t, files, 6) + assert.Contains(t, files, "test.trace.txt") + assert.Contains(t, files, "test.exceptionLog.txt") + assert.Contains(t, files, "test.designerflows.txt") + assert.Contains(t, files, "test.designereventflows.txt") + assert.Contains(t, files, "env.txt") + assert.Contains(t, files, "ps -ewww.txt") + }) + + t.Run("Failure test cases", func(t *testing.T) { + failureTestCases := []string{ + "test.trace.txt", + "test.designerflows.txt", + "env.txt", + "ps -ewww.txt", + } + + for _, fileName := range failureTestCases { + t.Run(fileName, func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := FailOnFileNameZipWriter{ + failOnFileName: fileName, + zipWriter: zip.NewWriter(&buffer), + } + err := zipServiceTrace(zipWriter) + assert.EqualError(t, err, "Unable to write "+fileName) + }) + } + }) +} + +func TestRunOSCommand(t *testing.T) { + log = testLogger + + t.Run("Returns an error if it can't run the command", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + err := runOSCommand(zipWriter, "file.txt", "asdasdd") + assert.Error(t, err) + zipWriter.Close() + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + assert.Len(t, checkZip(t, zipReader), 0) + }) + + t.Run("Returns an error if there is an error when creating a file in the zip", func(t *testing.T) { + zipWriter := CreateFailureZipWriter{} + err := runOSCommand(zipWriter, "file.txt", "echo", "hello world") + assert.Error(t, err) + }) + + t.Run("Returns an error if the command output can't be written to the zip", func(t *testing.T) { + zipWriter := WriteFailureZipWriter{} + err := runOSCommand(zipWriter, "file.txt", "echo", "hello world") + assert.Error(t, err) + }) + + t.Run("Adds the command output to the zip if successful", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + + err := runOSCommand(zipWriter, "file.txt", "echo", "hello world") + assert.NoError(t, err) + + zipWriter.Close() + + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + + files := checkZip(t, zipReader) + assert.Len(t, files, 1) + assert.Contains(t, files, "file.txt") + }) +} + +func TestZipDir(t *testing.T) { + log = testLogger + + // Create directories and files which will be archived + err := os.MkdirAll("subdir/parent/child", 0755) + require.NoError(t, err) + defer os.RemoveAll("subdir") + + files := []string{"subdir/parent/file1.txt", "subdir/parent/child/file2.txt", "subdir/parent/file3.txt"} + + for _, fileName := range files { + file, err := os.Create(fileName) + require.NoError(t, err) + _, err = file.WriteString("This is a test") + require.NoError(t, err) + } + + t.Run("Calls zipFile for each file and only adds files which pass the test function ", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + err := zipDir("subdir", zipWriter, func(fileName string) bool { + return !strings.Contains(fileName, "1") + }) + zipWriter.Close() + require.NoError(t, err) + + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + + files := checkZip(t, zipReader) + assert.Len(t, files, 2) + assert.Contains(t, files, "file2.txt") + assert.Contains(t, files, "file3.txt") + }) + + t.Run("Returns an error if the directory does not exist", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + err := zipDir("does-not-exist", zipWriter, func(string) bool { return true }) + zipWriter.Close() + assert.Error(t, err) + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + assert.Len(t, checkZip(t, zipReader), 0) + }) + + t.Run("Returns an error if passed a file that is not a directory", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + err := zipDir("subdir/parent/file1.txt", zipWriter, func(string) bool { return true }) + zipWriter.Close() + assert.EqualError(t, err, "subdir/parent/file1.txt is not a directory") + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + assert.Len(t, checkZip(t, zipReader), 0) + }) + + t.Run("Creates an empty zip if there are no files which pass the test function", func(t *testing.T) { + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + err := zipDir("subdir", zipWriter, func(fileName string) bool { + return !strings.Contains(fileName, "file") + }) + zipWriter.Close() + assert.NoError(t, err) + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + assert.Len(t, checkZip(t, zipReader), 0) + }) +} + +func TestZipFile(t *testing.T) { + log = testLogger + fileNameToZip := "fileToZip.txt" + + testSetup := func() { + // Create file which will be archived + fileToZip, err := os.Create(fileNameToZip) + require.NoError(t, err) + _, err = fileToZip.WriteString("This is a test") + require.NoError(t, err) + } + + t.Run("Returns an error when the file cannot be opened", func(t *testing.T) { + err := zipFile("badPath", nil) + assert.Error(t, err) + }) + + t.Run("Returns an error when it fails to create the file in the zip", func(t *testing.T) { + defer os.Remove(fileNameToZip) + testSetup() + + zipWriter := CreateFailureZipWriter{} + + err := zipFile(fileNameToZip, zipWriter) + assert.EqualError(t, err, "Failed to create") + }) + + t.Run("Returns an error when it fails to add the file to the zip", func(t *testing.T) { + defer os.Remove(fileNameToZip) + testSetup() + + zipWriter := WriteFailureZipWriter{} + + err := zipFile(fileNameToZip, zipWriter) + assert.EqualError(t, err, "Failed to write") + }) + + t.Run("Returns with no error when the header and file are successfully written to the zip", func(t *testing.T) { + defer os.Remove(fileNameToZip) + testSetup() + + var buffer bytes.Buffer + zipWriter := zip.NewWriter(&buffer) + err := zipFile("fileToZip.txt", zipWriter) + assert.NoError(t, err) + zipWriter.Close() + + zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes()))) + require.NoError(t, err) + files := checkZip(t, zipReader) + assert.Equal(t, 1, len(files)) + assert.Contains(t, files, "fileToZip.txt") + }) +} + +func TestReadBasicAuthCreds(t *testing.T) { + credsDir := "credentials" + + setupDir := func() { + err := os.MkdirAll(credsDir, 0755) + require.NoError(t, err) + } + + cleanUpDir := func() { + err := os.RemoveAll(credsDir) + require.NoError(t, err) + } + + t.Run("Returns an error if the directory does not exist", func(t *testing.T) { + err := readBasicAuthCreds("does/not/exist", &FileReader{}) + require.Error(t, err) + }) + + t.Run("Returns an error if the input parameter is not a directory", func(t *testing.T) { + setupDir() + defer cleanUpDir() + + _, err := os.Create(credsDir + "/emptyFile.txt") + require.NoError(t, err) + + err = readBasicAuthCreds(credsDir+"/emptyFile.txt", &FileReader{}) + require.EqualError(t, err, "credentials/emptyFile.txt is not a directory") + }) + + t.Run("Returns an error if there is an error reading a file", func(t *testing.T) { + setupDir() + defer cleanUpDir() + + _, err := os.Create(credsDir + "/admin-users.txt") + require.NoError(t, err) + + err = readBasicAuthCreds(credsDir, &ErrorFileReader{}) + require.EqualError(t, err, "Unable to read file") + }) + + t.Run("Returns an error if it fails to parse the credentials file", func(t *testing.T) { + setupDir() + defer cleanUpDir() + + file, err := os.Create(credsDir + "/admin-users.txt") + require.NoError(t, err) + _, err = file.WriteString("This is a test") + require.NoError(t, err) + + err = readBasicAuthCreds(credsDir, &FileReader{}) + require.EqualError(t, err, "Unable to parse admin-users.txt") + }) + + t.Run("Returns nil if the credentials have all been read and parsed - single line files", func(t *testing.T) { + log = testLogger + + setupDir() + defer cleanUpDir() + + credentials = []Credential{} + + file, err := os.Create(credsDir + "/admin-users.txt") + require.NoError(t, err) + _, err = file.WriteString("user1 pass1") + require.NoError(t, err) + + file, err = os.Create(credsDir + "/operator-users.txt") + require.NoError(t, err) + _, err = file.WriteString("user2 pass2") + require.NoError(t, err) + + err = readBasicAuthCreds(credsDir, &FileReader{}) + require.NoError(t, err) + + require.Len(t, credentials, 2) + assert.Equal(t, sha256.Sum256([]byte("user1")), credentials[0].Username) + assert.Equal(t, sha256.Sum256([]byte("pass1")), credentials[0].Password) + assert.Equal(t, sha256.Sum256([]byte("user2")), credentials[1].Username) + assert.Equal(t, sha256.Sum256([]byte("pass2")), credentials[1].Password) + }) + + t.Run("Returns nil if the credentials have all been read and parsed - multi line files, trailing spaces and comments", func(t *testing.T) { + log = testLogger + + setupDir() + defer cleanUpDir() + + credentials = []Credential{} + + file, err := os.Create(credsDir + "/admin-users.txt") + require.NoError(t, err) + _, err = file.WriteString("user1 pass1\nuser2 pass2\n") + require.NoError(t, err) + + file, err = os.Create(credsDir + "/operator-users.txt") + require.NoError(t, err) + _, err = file.WriteString("# this shouldn't cause an error \n# nor should this\nuser3 pass3 \n ") + require.NoError(t, err) + + err = readBasicAuthCreds(credsDir, &FileReader{}) + require.NoError(t, err) + + require.Len(t, credentials, 3) + assert.Equal(t, sha256.Sum256([]byte("user1")), credentials[0].Username) + assert.Equal(t, sha256.Sum256([]byte("pass1")), credentials[0].Password) + assert.Equal(t, sha256.Sum256([]byte("user2")), credentials[1].Username) + assert.Equal(t, sha256.Sum256([]byte("pass2")), credentials[1].Password) + assert.Equal(t, sha256.Sum256([]byte("user3")), credentials[2].Username) + assert.Equal(t, sha256.Sum256([]byte("pass3")), credentials[2].Password) + }) + + t.Run("does not add viewer, auditor or editor users", func(t *testing.T) { + log = testLogger + + setupDir() + defer cleanUpDir() + + credentials = []Credential{} + + file, err := os.Create(credsDir + "/auditor-users.txt") + require.NoError(t, err) + _, err = file.WriteString("user1 pass1") + require.NoError(t, err) + + file, err = os.Create(credsDir + "/operator-users.txt") + require.NoError(t, err) + _, err = file.WriteString("user2 pass2") + require.NoError(t, err) + + file, err = os.Create(credsDir + "/editor-users.txt") + require.NoError(t, err) + _, err = file.WriteString("user3 pass3") + require.NoError(t, err) + + file, err = os.Create(credsDir + "/viewer-users.txt") + require.NoError(t, err) + _, err = file.WriteString("user4 pass4") + require.NoError(t, err) + + err = readBasicAuthCreds(credsDir, &FileReader{}) + require.NoError(t, err) + + require.Len(t, credentials, 1) + assert.Equal(t, sha256.Sum256([]byte("user2")), credentials[0].Username) + assert.Equal(t, sha256.Sum256([]byte("pass2")), credentials[0].Password) + }) +} + +func TestBasicAuthMiddlware(t *testing.T) { + setCredentials := func() { + credentials = []Credential{{ + Username: sha256.Sum256([]byte("user1")), + Password: sha256.Sum256([]byte("pass1")), + }, { + Username: sha256.Sum256([]byte("user2")), + Password: sha256.Sum256([]byte("pass2")), + }, + } + } + + t.Run("No credentials defined", func(t *testing.T) { + credentials = []Credential{} + response := httptest.NewRecorder() + req := httptest.NewRequest("GET", "http://testing", nil) + + handlerToTest := basicAuthMiddlware(nil) + handlerToTest.ServeHTTP(response, req) + + assert.Equal(t, 401, response.Result().StatusCode) + }) + + t.Run("No basic auth credentials in request", func(t *testing.T) { + setCredentials() + response := httptest.NewRecorder() + req := httptest.NewRequest("GET", "http://testing", nil) + + handlerToTest := basicAuthMiddlware(nil) + handlerToTest.ServeHTTP(response, req) + + assert.Equal(t, 401, response.Result().StatusCode) + }) + + t.Run("Invalid basic auth credentials", func(t *testing.T) { + setCredentials() + response := httptest.NewRecorder() + req := httptest.NewRequest("GET", "http://testing", nil) + req.SetBasicAuth("invaliduser", "invalidpass") + + handlerToTest := basicAuthMiddlware(nil) + handlerToTest.ServeHTTP(response, req) + + assert.Equal(t, 401, response.Result().StatusCode) + }) + + t.Run("Matching username", func(t *testing.T) { + setCredentials() + response := httptest.NewRecorder() + req := httptest.NewRequest("GET", "http://testing", nil) + req.SetBasicAuth("user1", "invalidpass") + + handlerToTest := basicAuthMiddlware(nil) + handlerToTest.ServeHTTP(response, req) + + assert.Equal(t, 401, response.Result().StatusCode) + }) + + t.Run("Matching password", func(t *testing.T) { + setCredentials() + response := httptest.NewRecorder() + req := httptest.NewRequest("GET", "http://testing", nil) + req.SetBasicAuth("invaliduser", "pass1") + + handlerToTest := basicAuthMiddlware(nil) + handlerToTest.ServeHTTP(response, req) + + assert.Equal(t, 401, response.Result().StatusCode) + }) + + t.Run("Matches first credentials", func(t *testing.T) { + setCredentials() + response := httptest.NewRecorder() + req := httptest.NewRequest("GET", "http://testing", nil) + req.SetBasicAuth("user1", "pass1") + + var called bool + + handlerToTest := basicAuthMiddlware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + })) + handlerToTest.ServeHTTP(response, req) + + assert.True(t, called) + }) + + t.Run("Matches second credentials", func(t *testing.T) { + setCredentials() + response := httptest.NewRecorder() + req := httptest.NewRequest("GET", "http://testing", nil) + req.SetBasicAuth("user2", "pass2") + + var called bool + + handlerToTest := basicAuthMiddlware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + })) + handlerToTest.ServeHTTP(response, req) + + assert.True(t, called) + }) +} +func TestGetCACertPool(t *testing.T) { + log = testLogger + + defer func() { + os.RemoveAll("cert-dir") + }() + + err := os.MkdirAll("cert-dir", 0755) + require.NoError(t, err) + + _, err = os.Create("cert-dir/empty-crt.pem") // empty file for parsing errors + require.NoError(t, err) + + file, err := os.Create("cert-dir/valid-crt.pem") + require.NoError(t, err) + + cert := createValidCA() + _, err = file.Write(cert) + require.NoError(t, err) + + t.Run("Returns an error if caPath is not a file", func(t *testing.T) { + _, err := getCACertPool("does-not-exist", &FileReader{}) + require.Error(t, err) + }) + + t.Run("Returns an error if the ca file cannot be read", func(t *testing.T) { + _, err := getCACertPool("cert-dir/valid-crt.pem", &ErrorFileReader{}) + require.Error(t, err) + }) + + t.Run("Returns an error if the ca file cannot be parsed", func(t *testing.T) { + _, err := getCACertPool("cert-dir/empty-crt.pem", &FileReader{}) + require.Error(t, err) + }) + + t.Run("Returns a caPool with one cert if the ca file is added successfully", func(t *testing.T) { + caCertPool, err := getCACertPool("cert-dir/valid-crt.pem", &FileReader{}) + require.NoError(t, err) + assert.Len(t, caCertPool.Subjects(), 1) + }) + + t.Run("Does not return an error when a file reading error occurs in a directory", func(t *testing.T) { + caCertPool, err := getCACertPool("cert-dir", &ErrorFileReader{}) + require.NoError(t, err) + assert.Len(t, caCertPool.Subjects(), 0) // both files won't be read + }) + + t.Run("Does not return an error when files can't be parsed", func(t *testing.T) { + caCertPool, err := getCACertPool("cert-dir", &FileReader{}) + require.NoError(t, err) + assert.Len(t, caCertPool.Subjects(), 1) // the empty file will fail to parse + }) +} + +func checkZip(t *testing.T, zipReader *zip.Reader) []string { + var files []string + for _, f := range zipReader.File { + files = append(files, f.FileHeader.Name) + } + return files +} + +func setUpUserTrace(t *testing.T) { + traceDir = "test/trace" + + err := os.MkdirAll(traceDir, 0755) + require.NoError(t, err) + + files := []string{ + traceDir + "/test.userTrace.txt", + traceDir + "/no-match.txt", + } + + for _, fileName := range files { + file, err := os.Create(fileName) + require.NoError(t, err) + _, err = file.WriteString("This is a test") + require.NoError(t, err) + } +} + +func restoreUserTrace() { + os.RemoveAll("test") +} + +func setUpServiceTrace(t *testing.T) { + traceDir = "test/trace" + operationalLogDir = "test/log" + + directories := []string{ + traceDir, + operationalLogDir, + } + + for _, dir := range directories { + err := os.MkdirAll(dir, 0755) + require.NoError(t, err) + } + + files := []string{ + traceDir + "/test.trace.txt", + traceDir + "/test.exceptionLog.txt", + traceDir + "/no-match.txt", + operationalLogDir + "/test.designerflows.txt", + operationalLogDir + "/test.designereventflows.txt", + operationalLogDir + "/no-match.txt", + } + + for _, fileName := range files { + file, err := os.Create(fileName) + require.NoError(t, err) + _, err = file.WriteString("This is a test") + require.NoError(t, err) + } +} + +func restoreServiceTrace() { + os.RemoveAll("test") +} + +func createValidCA() []byte { + privKey, _ := rsa.GenerateKey(rand.Reader, 4096) + ca := &x509.Certificate{ + SerialNumber: &big.Int{}, + IsCA: true, + } + + caBytes, _ := x509.CreateCertificate(rand.Reader, ca, ca, &privKey.PublicKey, privKey) + + caPEM := new(bytes.Buffer) + _ = pem.Encode(caPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + }) + + return caPEM.Bytes() +} + +type CreateFailureZipWriter struct{} + +func (zw CreateFailureZipWriter) Create(filename string) (io.Writer, error) { + return nil, errors.New("Failed to create") +} +func (zw CreateFailureZipWriter) Close() error { + return nil +} + +type WriteFailureZipWriter struct{} + +func (zw WriteFailureZipWriter) Create(filename string) (io.Writer, error) { + zipEntry := WriteFailureZipEntry{} + return zipEntry, nil +} +func (zw WriteFailureZipWriter) Close() error { + return nil +} + +type WriteFailureZipEntry struct{} + +func (ze WriteFailureZipEntry) Write(p []byte) (n int, err error) { + return 0, errors.New("Failed to write") +} + +type FailOnFileNameZipWriter struct { + failOnFileName string + zipWriter ZipWriterInterface +} + +func (zw FailOnFileNameZipWriter) Create(filename string) (io.Writer, error) { + if filename == zw.failOnFileName { + return nil, errors.New("Unable to write " + zw.failOnFileName) + } + return zw.zipWriter.Create(filename) +} +func (zw FailOnFileNameZipWriter) Close() error { + return zw.zipWriter.Close() +} + +type ErrorFileReader struct{} + +func (fr *ErrorFileReader) ReadFile(string) ([]byte, error) { + return nil, errors.New("Unable to read file") +} + +type TestServer struct { + t *testing.T + expectedAddress string + expectedCertFile string + expectedKeyFile string +} + +func (s *TestServer) Start(address string, mux *http.ServeMux) { + assert.Equal(s.t, s.expectedAddress, address) +} + +func (s *TestServer) StartTLS(address string, mux *http.ServeMux, caCertPool *x509.CertPool, certFile string, keyFile string) { + assert.Equal(s.t, s.expectedAddress, address) + assert.Equal(s.t, s.expectedCertFile, certFile) + assert.Equal(s.t, s.expectedKeyFile, keyFile) +} diff --git a/internal/webadmin/testdata/initial-config/webusers/admin-users.txt b/internal/webadmin/testdata/initial-config/webusers/admin-users.txt new file mode 100644 index 0000000..bc07495 --- /dev/null +++ b/internal/webadmin/testdata/initial-config/webusers/admin-users.txt @@ -0,0 +1 @@ +ibm-ace-dashboard-admin 1758F07A-8BEF-448C-B020-C25946AF3E94 \ No newline at end of file diff --git a/internal/webadmin/testdata/initial-config/webusers/audit-users.txt b/internal/webadmin/testdata/initial-config/webusers/audit-users.txt new file mode 100644 index 0000000..2926f91 --- /dev/null +++ b/internal/webadmin/testdata/initial-config/webusers/audit-users.txt @@ -0,0 +1 @@ +ibm-ace-dashboard-audit 929064C2-0017-4B34-A883-219A4D1AC944 \ No newline at end of file diff --git a/internal/webadmin/testdata/initial-config/webusers/editor-users.txt b/internal/webadmin/testdata/initial-config/webusers/editor-users.txt new file mode 100644 index 0000000..d4073aa --- /dev/null +++ b/internal/webadmin/testdata/initial-config/webusers/editor-users.txt @@ -0,0 +1 @@ +ibm-ace-dashboard-editor 28DBC34B-C0FD-44BF-8100-99DB686B6DB2 \ No newline at end of file diff --git a/internal/webadmin/testdata/initial-config/webusers/operator-users.txt b/internal/webadmin/testdata/initial-config/webusers/operator-users.txt new file mode 100644 index 0000000..20c5008 --- /dev/null +++ b/internal/webadmin/testdata/initial-config/webusers/operator-users.txt @@ -0,0 +1 @@ +ibm-ace-dashboard-operator 68FE7808-8EC2-4395-97D0-A776D2A61912 \ No newline at end of file diff --git a/internal/webadmin/testdata/initial-config/webusers/server.conf.yaml b/internal/webadmin/testdata/initial-config/webusers/server.conf.yaml new file mode 100644 index 0000000..b2b8775 --- /dev/null +++ b/internal/webadmin/testdata/initial-config/webusers/server.conf.yaml @@ -0,0 +1,4 @@ +RestAdminListener: + authorizationEnabled: true + authorizationMode: file + basicAuth: true \ No newline at end of file diff --git a/internal/webadmin/testdata/initial-config/webusers/viewer-users.txt b/internal/webadmin/testdata/initial-config/webusers/viewer-users.txt new file mode 100644 index 0000000..e93014e --- /dev/null +++ b/internal/webadmin/testdata/initial-config/webusers/viewer-users.txt @@ -0,0 +1 @@ +ibm-ace-dashboard-viewer EF086556-74B8-4FB0-ACF8-CC59E1F3DB5F \ No newline at end of file diff --git a/internal/webadmin/webadmin.go b/internal/webadmin/webadmin.go new file mode 100644 index 0000000..0d810db --- /dev/null +++ b/internal/webadmin/webadmin.go @@ -0,0 +1,222 @@ +package webadmin + +import ( + "crypto/rand" + "crypto/sha512" + b64 "encoding/base64" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/ot4i/ace-docker/common/logger" + "golang.org/x/crypto/pbkdf2" + "gopkg.in/yaml.v2" +) + +var ( + readFile = os.ReadFile + mkdirAll = os.MkdirAll + writeFile = os.WriteFile + readDir = os.ReadDir + processWebAdminUsers = processWebAdminUsersLocal + applyFileAuthOverrides = applyFileAuthOverridesLocal + outputFiles = outputFilesLocal + unmarshal = yaml.Unmarshal + marshal = yaml.Marshal + readWebUsersTxt = readWebUsersTxtLocal + version string = "12.0.0.0" + homedir string = "/home/aceuser/" + webusersDir string = "/home/aceuser/initial-config/webusers/" +) + +func ConfigureWebAdminUsers(log logger.LoggerInterface) error { + serverConfContent, err := readServerConfFile() + if err != nil { + log.Errorf("Error reading server.conf.yaml: %v", err) + return err + } + + webAdminUserInfo, err := processWebAdminUsers(log, webusersDir) + if err != nil { + log.Errorf("Error processing WebAdmin users: %v", err) + return err + } + serverconfYaml, err := applyFileAuthOverrides(log, webAdminUserInfo, serverConfContent) + if err != nil { + log.Errorf("Error applying file auth overrides: %v", err) + return err + } + err = writeServerConfFile(serverconfYaml) + if err != nil { + log.Errorf("Error writing server.conf.yaml: %v", err) + return err + } + + for webAdminUserName, webAdminPass := range webAdminUserInfo { + m := map[string]string{ + "password": keyGen(webAdminPass), + "role": webAdminUserName, + "version": version, + } + err := outputFiles(log, m) + if err != nil { + log.Errorf("Error writing WebAdmin files: %v", err) + return err + } + } + return nil +} + +func processWebAdminUsersLocal(log logger.LoggerInterface, dir string) (map[string]string, error) { + userInfo := map[string]string{} + + fileList, err := readDir(dir) + if err != nil { + log.Errorf("Error reading directory: %v", err) + return nil, err + } + + for _, file := range fileList { + if filepath.Ext(file.Name()) == ".txt" { + username, password, err := readWebUsersTxt(log, dir+file.Name()) + if err != nil { + log.Errorf("Error reading WebAdmin users.txt file: %v", err) + return nil, err + } + userInfo[username] = password + } + } + return userInfo, nil +} + +func applyFileAuthOverridesLocal(log logger.LoggerInterface, webAdminUserInfo map[string]string, serverconfContent []byte) ([]byte, error) { + serverconfMap := make(map[interface{}]interface{}) + err := unmarshal([]byte(serverconfContent), &serverconfMap) + if err != nil { + log.Errorf("Error unmarshalling server.conf.yaml content: %v", err) + return nil, err + } + + permissionsMap := map[string]string{ + "admin": "read+:write+:execute+", + "operator": "read+:write-:execute+", + "editor": "read+:write+:execute-", + "audit": "read+:write-:execute-", + "viewer": "read+:write-:execute-", + } + + if serverconfMap["Security"] == nil { + serverconfMap["Security"] = map[interface{}]interface{}{} + security := serverconfMap["Security"].(map[interface{}]interface{}) + if security["DataPermissions"] == nil && security["Permissions"] == nil { + security["DataPermissions"] = map[interface{}]interface{}{} + dataPermissions := security["DataPermissions"].(map[interface{}]interface{}) + security["Permissions"] = map[interface{}]interface{}{} + permissions := security["Permissions"].(map[interface{}]interface{}) + if _, ok := webAdminUserInfo["ibm-ace-dashboard-admin"]; ok { + dataPermissions["admin"] = permissionsMap["admin"] + permissions["admin"] = permissionsMap["admin"] + } + if _, ok := webAdminUserInfo["ibm-ace-dashboard-operator"]; ok { + permissions["operator"] = permissionsMap["operator"] + } + if _, ok := webAdminUserInfo["ibm-ace-dashboard-editor"]; ok { + permissions["editor"] = permissionsMap["editor"] + } + if _, ok := webAdminUserInfo["ibm-ace-dashboard-audit"]; ok { + permissions["audit"] = permissionsMap["audit"] + } + if _, ok := webAdminUserInfo["ibm-ace-dashboard-viewer"]; ok { + permissions["viewer"] = permissionsMap["viewer"] + } + } + } else { + security := serverconfMap["Security"].(map[interface{}]interface{}) + if security["DataPermissions"] == nil && security["Permissions"] == nil { + security["DataPermissions"] = map[interface{}]interface{}{} + dataPermissions := security["DataPermissions"].(map[interface{}]interface{}) + security["Permissions"] = map[interface{}]interface{}{} + permissions := security["Permissions"].(map[interface{}]interface{}) + if _, ok := webAdminUserInfo["ibm-ace-dashboard-admin"]; ok { + dataPermissions["admin"] = permissionsMap["admin"] + permissions["admin"] = permissionsMap["admin"] + } + if _, ok := webAdminUserInfo["ibm-ace-dashboard-operator"]; ok { + permissions["operator"] = permissionsMap["operator"] + } + if _, ok := webAdminUserInfo["ibm-ace-dashboard-editor"]; ok { + permissions["editor"] = permissionsMap["editor"] + } + if _, ok := webAdminUserInfo["ibm-ace-dashboard-audit"]; ok { + permissions["audit"] = permissionsMap["audit"] + } + if _, ok := webAdminUserInfo["ibm-ace-dashboard-viewer"]; ok { + permissions["viewer"] = permissionsMap["viewer"] + } + } + } + + serverconfYaml, err := marshal(&serverconfMap) + if err != nil { + log.Errorf("Error marshalling server.conf.yaml overrides: %v", err) + return nil, err + } + + return serverconfYaml, nil + +} + +func keyGen(password string) string { + salt := make([]byte, 16) + rand.Read(salt) + dk := pbkdf2.Key([]byte(password), salt, 65536, 64, sha512.New) + return fmt.Sprintf("PBKDF2-SHA-512:%s:%s", b64EncodeString(salt), b64EncodeString(dk)) +} + +func readServerConfFile() ([]byte, error) { + return readFile(homedir + "ace-server/overrides/server.conf.yaml") + +} + +func writeServerConfFile(content []byte) error { + return writeFile(homedir+"ace-server/overrides/server.conf.yaml", content, 0644) +} + +func outputFilesLocal(log logger.LoggerInterface, files map[string]string) error { + dir := homedir + "ace-server/config/registry/integration_server/CurrentVersion/WebAdmin/user/" + webadminDir := dir + files["role"] + err := mkdirAll(webadminDir, 0755) + if err != nil { + log.Errorf("Error creating directories: %v", err) + return err + } + + for fileName, fileContent := range files { + // The 'role' is populated from the users.txt files in initial-config e.g. admin-users.txt we need to trim this to the actual role which would be 'admin' + fileContent = strings.TrimPrefix(fileContent, "ibm-ace-dashboard-") + err := writeFile(webadminDir+"/"+fileName, []byte(fileContent), 0660) + if err != nil { + log.Errorf("Error writing files: %v %s", err, fileName) + return err + } + + } + + return nil +} + +func b64EncodeString(data []byte) string { + return b64.StdEncoding.EncodeToString(data) +} + +func readWebUsersTxtLocal(log logger.LoggerInterface, filename string) (string, string, error) { + out, err := readFile(filename) + if err != nil { + log.Errorf("Error reading WebAdmin users.txt file: %v", err) + return "", "", err + } + + credentials := strings.Fields(string(out)) + return credentials[0], credentials[1], nil +} diff --git a/internal/webadmin/webadmin_test.go b/internal/webadmin/webadmin_test.go new file mode 100644 index 0000000..b95cc5b --- /dev/null +++ b/internal/webadmin/webadmin_test.go @@ -0,0 +1,490 @@ +package webadmin + +import ( + b64 "encoding/base64" + "errors" + "os" + "strings" + "testing" + + "github.com/ot4i/ace-docker/common/logger" + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v2" +) + +// Allows us to read server.conf.yaml out into a struct which makes it easier check the values +type ServerConf struct { + RestAdminListener struct { + AuthorizationEnabled bool `yaml:"authorizationEnabled"` + } `yaml:"RestAdminListener"` + Security struct { + LdapAuthorizeAttributeToRoleMap struct { + RandomField string `yaml:"randomfield"` + } `yaml:"LdapAuthorizeAttributeToRoleMap"` + DataPermissions struct { + Admin string `yaml:"admin"` + } `yaml:"DataPermissions"` + Permissions struct { + Admin string `yaml:"admin"` + Audit string `yaml:"audit"` + Editor string `yaml:"editor"` + Operator string `yaml:"operator"` + Viewer string `yaml:"viewer"` + } `yaml:"Permissions"` + } `yaml:"Security"` +} +type ServerConfWithoutSecurity struct { + RestAdminListener struct { + AuthorizationEnabled bool `yaml:"authorizationEnabled"` + } `yaml:"RestAdminListener"` +} + +type ServerConfNoPermissions struct { + RestAdminListener struct { + AuthorizationEnabled bool `yaml:"authorizationEnabled"` + } `yaml:"RestAdminListener"` + Security struct { + LdapAuthorizeAttributeToRoleMap struct { + RandomField string `yaml:"randomfield"` + } `yaml:"LdapAuthorizeAttributeToRoleMap"` + } `yaml:"Security"` +} + +func Test_ConfigureWebAdminUsers(t *testing.T) { + log, _ := logger.NewLogger(os.Stdout, true, false, "testloger") + oldReadFile := readFile + oldProcessWebAdminUsers := processWebAdminUsers + oldApplyFileAuthOverrides := applyFileAuthOverrides + oldWriteFile := writeFile + oldOutputFiles := outputFiles + t.Run("Golden path - all functions are free from errors and ConfigureWebAdminUsers returns nil", func(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return nil, nil + } + + processWebAdminUsers = func(logger.LoggerInterface, string) (map[string]string, error) { + usersMap := map[string]string{ + "ibm-ace-dashboard-admin": "12AB0C96-E155-43FA-BA03-BD93AA2166E0", + } + + return usersMap, nil + } + + applyFileAuthOverrides = func(log logger.LoggerInterface, webAdminUserInfo map[string]string, serverconfContent []byte) ([]byte, error) { + + return nil, nil + } + writeFile = func(name string, data []byte, perm os.FileMode) error { + return nil + } + outputFiles = func(logger.LoggerInterface, map[string]string) error { + return nil + } + err := ConfigureWebAdminUsers(log) + assert.NoError(t, err) + readFile = oldReadFile + processWebAdminUsers = oldProcessWebAdminUsers + applyFileAuthOverrides = oldApplyFileAuthOverrides + writeFile = oldWriteFile + outputFiles = oldOutputFiles + + }) + + t.Run("readServerConfFile returns an error unable to read server.conf.yaml file", func(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return nil, errors.New("Unable to read server.conf.yaml") + } + err := ConfigureWebAdminUsers(log) + assert.Error(t, err) + assert.Equal(t, "Unable to read server.conf.yaml", err.Error()) + readFile = oldReadFile + + }) + t.Run("processAdminUsers returns an error unable to process webadmin users", func(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return nil, nil + } + + processWebAdminUsers = func(log logger.LoggerInterface, dir string) (map[string]string, error) { + return nil, errors.New("Unable to process web admin users") + } + + err := ConfigureWebAdminUsers(log) + assert.Error(t, err) + assert.Equal(t, "Unable to process web admin users", err.Error()) + readFile = oldReadFile + processWebAdminUsers = oldProcessWebAdminUsers + + }) + + t.Run("applyFileAuthOverrides returns an error unable to apply file auth overrides", func(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return nil, nil + } + + processWebAdminUsers = func(log logger.LoggerInterface, dir string) (map[string]string, error) { + return nil, nil + } + + applyFileAuthOverrides = func(log logger.LoggerInterface, webAdminUserInfo map[string]string, serverconfContent []byte) ([]byte, error) { + return nil, errors.New("Unable to apply file auth overrides") + } + + err := ConfigureWebAdminUsers(log) + assert.Error(t, err) + assert.Equal(t, "Unable to apply file auth overrides", err.Error()) + readFile = oldReadFile + processWebAdminUsers = oldProcessWebAdminUsers + applyFileAuthOverrides = oldApplyFileAuthOverrides + + }) + + t.Run("writeServerConfFile error writing server.conf.yaml overrides back into the file", func(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return nil, nil + } + + processWebAdminUsers = func(log logger.LoggerInterface, dir string) (map[string]string, error) { + return nil, nil + } + + applyFileAuthOverrides = func(log logger.LoggerInterface, webAdminUserInfo map[string]string, serverconfContent []byte) ([]byte, error) { + return nil, nil + } + writeFile = func(name string, data []byte, perm os.FileMode) error { + return errors.New("Error writing server.conf.yaml back after overrides") + } + err := ConfigureWebAdminUsers(log) + assert.Error(t, err) + assert.Equal(t, "Error writing server.conf.yaml back after overrides", err.Error()) + readFile = oldReadFile + processWebAdminUsers = oldProcessWebAdminUsers + applyFileAuthOverrides = oldApplyFileAuthOverrides + writeFile = oldWriteFile + + }) + + t.Run("writeServerConfFile error writing server.conf.yaml overrides back into the file", func(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return nil, nil + } + + processWebAdminUsers = func(logger.LoggerInterface, string) (map[string]string, error) { + usersMap := map[string]string{ + "ibm-ace-dashboard-admin": "12AB0C96-E155-43FA-BA03-BD93AA2166E0", + } + + return usersMap, nil + } + + applyFileAuthOverrides = func(log logger.LoggerInterface, webAdminUserInfo map[string]string, serverconfContent []byte) ([]byte, error) { + + return nil, nil + } + writeFile = func(name string, data []byte, perm os.FileMode) error { + return nil + } + outputFiles = func(logger.LoggerInterface, map[string]string) error { + return errors.New("Error outputting files during password generation") + } + err := ConfigureWebAdminUsers(log) + assert.Error(t, err) + assert.Equal(t, "Error outputting files during password generation", err.Error()) + readFile = oldReadFile + processWebAdminUsers = oldProcessWebAdminUsers + applyFileAuthOverrides = oldApplyFileAuthOverrides + writeFile = oldWriteFile + outputFiles = oldOutputFiles + }) +} + +func Test_processWebAdminUsers(t *testing.T) { + log, _ := logger.NewLogger(os.Stdout, true, false, "testloger") + t.Run("readDir returns error reading directory", func(t *testing.T) { + oldReadDir := readDir + readDir = func(name string) ([]os.DirEntry, error) { + return nil, errors.New("Error reading directory") + } + _, err := processWebAdminUsers(log, "dir") + assert.Error(t, err) + assert.Equal(t, "Error reading directory", err.Error()) + + readDir = oldReadDir + }) + t.Run("Golden path - processWebAdminUsers loops over fileList and falls readWebUsersTxt for each .txt file ", func(t *testing.T) { + webAdminUsers, err := processWebAdminUsers(log, "testdata/initial-config/webusers/") + assert.NoError(t, err) + assert.Equal(t, "1758F07A-8BEF-448C-B020-C25946AF3E94", webAdminUsers["ibm-ace-dashboard-admin"]) + assert.Equal(t, "68FE7808-8EC2-4395-97D0-A776D2A61912", webAdminUsers["ibm-ace-dashboard-operator"]) + assert.Equal(t, "28DBC34B-C0FD-44BF-8100-99DB686B6DB2", webAdminUsers["ibm-ace-dashboard-editor"]) + assert.Equal(t, "929064C2-0017-4B34-A883-219A4D1AC944", webAdminUsers["ibm-ace-dashboard-audit"]) + assert.Equal(t, "EF086556-74B8-4FB0-ACF8-CC59E1F3DB5F", webAdminUsers["ibm-ace-dashboard-viewer"]) + }) + + t.Run("readWebUsersTxt fails to read files", func(t *testing.T) { + oldReadWebUsersTxt := readWebUsersTxt + readWebUsersTxt = func(logger logger.LoggerInterface, filename string) (string, string, error) { + return "", "", errors.New("Error reading WebAdmin users txt file") + } + _, err := processWebAdminUsers(log, "testdata/initial-config/webusers") + assert.Error(t, err) + assert.Equal(t, "Error reading WebAdmin users txt file", err.Error()) + + readWebUsersTxt = oldReadWebUsersTxt + }) + +} + +func Test_applyFileAuthOverrides(t *testing.T) { + log, _ := logger.NewLogger(os.Stdout, true, false, "testloger") + usersMap := map[string]string{ + "ibm-ace-dashboard-admin": "12AB0C96-E155-43FA-BA03-BD93AA2166E0", + "ibm-ace-dashboard-operator": "12AB0C96-E155-43FA-BA03-BD93AA2166E0", + "ibm-ace-dashboard-editor": "12AB0C96-E155-43FA-BA03-BD93AA2166E0", + "ibm-ace-dashboard-audit": "12AB0C96-E155-43FA-BA03-BD93AA2166E0", + "ibm-ace-dashboard-viewer": "12AB0C96-E155-43FA-BA03-BD93AA2166E0", + } + t.Run("Golden path - server.conf.yaml is populated as expected", func(t *testing.T) { + // Pass in server.conf.yaml with some fields populated to prove we don't remove existing overrides + servConf := &ServerConfWithoutSecurity{} + servConf.RestAdminListener.AuthorizationEnabled = true + servConfByte, err := yaml.Marshal(servConf) + assert.NoError(t, err) + + serverConfContent, err := applyFileAuthOverrides(log, usersMap, servConfByte) + assert.NoError(t, err) + + serverconfMap := make(map[interface{}]interface{}) + err = yaml.Unmarshal(serverConfContent, &serverconfMap) + assert.NoError(t, err) + // This struct has the security tab so that it can parse all the information out to checked in assertions + var serverConfWithSecurity ServerConf + err = yaml.Unmarshal(serverConfContent, &serverConfWithSecurity) + if err != nil { + t.Log(err) + t.Fail() + + } + + assert.Equal(t, "read+:write+:execute+", serverConfWithSecurity.Security.DataPermissions.Admin) + assert.Equal(t, "read+:write+:execute+", serverConfWithSecurity.Security.Permissions.Admin) + + assert.Equal(t, "read+:write-:execute+", serverConfWithSecurity.Security.Permissions.Operator) + assert.Equal(t, "read+:write+:execute-", serverConfWithSecurity.Security.Permissions.Editor) + assert.Equal(t, "read+:write-:execute-", serverConfWithSecurity.Security.Permissions.Audit) + assert.Equal(t, "read+:write-:execute-", serverConfWithSecurity.Security.Permissions.Viewer) + + }) + t.Run("server.conf.yaml has a Security entry but no entry for DataPermissions or Permissions - to prove we still change permissions if security exists in yaml", func(t *testing.T) { + + servConf := &ServerConfNoPermissions{} + servConf.Security.LdapAuthorizeAttributeToRoleMap.RandomField = "randomstring" + servConfByte, err := yaml.Marshal(servConf) + assert.NoError(t, err) + + serverConfContent, err := applyFileAuthOverrides(log, usersMap, servConfByte) + assert.NoError(t, err) + + serverconfMap := make(map[interface{}]interface{}) + err = yaml.Unmarshal(serverConfContent, &serverconfMap) + assert.NoError(t, err) + + // If the Permissions or DataPermissions do not exist we create them and therefore the below struct is to parse them into to make the `assert.Equal` checks easy + var serverConfWithSecurity ServerConf + err = yaml.Unmarshal(serverConfContent, &serverConfWithSecurity) + if err != nil { + t.Log(err) + t.Fail() + } + + assert.Equal(t, "read+:write+:execute+", serverConfWithSecurity.Security.DataPermissions.Admin) + assert.Equal(t, "read+:write+:execute+", serverConfWithSecurity.Security.Permissions.Admin) + + assert.Equal(t, "read+:write-:execute+", serverConfWithSecurity.Security.Permissions.Operator) + assert.Equal(t, "read+:write+:execute-", serverConfWithSecurity.Security.Permissions.Editor) + assert.Equal(t, "read+:write-:execute-", serverConfWithSecurity.Security.Permissions.Audit) + assert.Equal(t, "read+:write-:execute-", serverConfWithSecurity.Security.Permissions.Viewer) + + }) + + t.Run("Unable to unmarhsall server conf into map for parsing", func(t *testing.T) { + oldUnmarshal := unmarshal + unmarshal = func(in []byte, out interface{}) (err error) { + return errors.New("Unable to unmarshall server conf") + } + + _, err := applyFileAuthOverrides(log, usersMap, []byte{}) + assert.Error(t, err) + assert.Equal(t, "Unable to unmarshall server conf", err.Error()) + unmarshal = oldUnmarshal + }) + + t.Run("Unable to marshall server conf after processing", func(t *testing.T) { + oldMarshal := marshal + marshal = func(in interface{}) (out []byte, err error) { + return nil, errors.New("Unable to marshall server conf") + } + _, err := applyFileAuthOverrides(log, usersMap, []byte{}) + assert.Error(t, err) + assert.Equal(t, "Unable to marshall server conf", err.Error()) + marshal = oldMarshal + }) +} +func Test_KeyGen(t *testing.T) { + // Result is of the format ALGORITHM:SALT:ENCRYPTED-PASSWORD + result := keyGen("afc6dd77-ee58-4a51-8ecd-26f55e2ce2fb") + splitResult := strings.Split(result, ":") + + // Decode salt + decodedString, err := b64.StdEncoding.DecodeString(splitResult[1]) + if err != nil { + t.Log(err) + t.Fail() + } + + // Decode password + decodedPasswordString, err := b64.StdEncoding.DecodeString(splitResult[2]) + if err != nil { + t.Log(err) + t.Fail() + } + assert.Equal(t, "PBKDF2-SHA-512", splitResult[0]) + assert.Equal(t, 16, len(decodedString)) + assert.Equal(t, 64, len(decodedPasswordString)) +} +func Test_readServerConfFile(t *testing.T) { + readFile = func(name string) ([]byte, error) { + serverConfContent := []byte("this is a fake server.conf.yaml file") + return serverConfContent, nil + } + serverConfContent, err := readServerConfFile() + assert.NoError(t, err) + assert.Equal(t, "this is a fake server.conf.yaml file", string(serverConfContent)) +} + +func Test_writeServerConfFile(t *testing.T) { + writeFile = func(name string, data []byte, perm os.FileMode) error { + return nil + } + serverConfContent := []byte{} + err := writeServerConfFile(serverConfContent) + assert.NoError(t, err) +} + +func Test_outputFiles(t *testing.T) { + log, _ := logger.NewLogger(os.Stdout, true, false, "testloger") + m := map[string]string{ + "password": "password1234", + "role": "ibm-ace-dashboard-admin", + "version": "12.0.0.0", + } + t.Run("Golden path scenario - Outputting all files is successful and we get a nil error", func(t *testing.T) { + oldmkdirAll := mkdirAll + oldwriteFile := writeFile + mkdirAll = func(path string, perm os.FileMode) error { + return nil + } + writeFile = func(name string, data []byte, perm os.FileMode) error { + /* + This UT will fail if the code to trim 'ibm-ace-dashboard-' from the contents of the 'role' file gets removed. + This contents gets read in dfrom users.txt file e.g. admin-users.txt with 'ibm-ace-dashboard- PASSWORD' as the format. + Example - 'ibm-ace-dashboard-admin 08FDD35A-6EA0-4D48-A87D-E6373D414824' + We need to trim the 'ibm-ace-dashboard-admin' down to 'admin' as that is the role that is used in the server.conf.yaml overrides. + */ + if strings.Contains(name, "role") { + if strings.Contains(string(data), "ibm-ace-dashboard-") { + t.Log("writeFile should be called with only the role e.g. 'admin' and not 'ibm-ace-dashboard-admin'") + t.Fail() + } + } + return nil + } + err := outputFiles(log, m) + assert.NoError(t, err) + mkdirAll = oldmkdirAll + writeFile = oldwriteFile + }) + + t.Run("mkdirAll fails to create the directories for WebAdmin users", func(t *testing.T) { + oldmkdirAll := mkdirAll + mkdirAll = func(path string, perm os.FileMode) error { + return errors.New("mkdirAll fails to create WebAdmin users text") + } + outputFiles(log, m) + mkdirAll = oldmkdirAll + }) + + t.Run("Writing the file to disk fails", func(t *testing.T) { + oldmkdirAll := mkdirAll + mkdirAll = func(path string, perm os.FileMode) error { + return nil + } + oldwriteFile := writeFile + writeFile = func(name string, data []byte, perm os.FileMode) error { + return errors.New("Unable to write files to disk") + } + err := outputFiles(log, m) + assert.Error(t, err) + mkdirAll = oldmkdirAll + writeFile = oldwriteFile + }) + +} +func Test_b64EncodeString(t *testing.T) { + type args struct { + data []byte + } + tests := []struct { + name string + args args + want string + }{ + { + name: "Base64 the following text - hello", + args: args{data: []byte("hello")}, + want: "aGVsbG8=", + }, + { + name: "Base64 the following text - randomtext", + args: args{data: []byte("randomtext")}, + want: "cmFuZG9tdGV4dA==", + }, + { + name: "Base64 the following text - afc6dd77-ee58-4a51-8ecd-26f55e2ce2fb", + args: args{data: []byte("afc6dd77-ee58-4a51-8ecd-26f55e2ce2fb")}, + want: "YWZjNmRkNzctZWU1OC00YTUxLThlY2QtMjZmNTVlMmNlMmZi", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := b64EncodeString(tt.args.data); got != tt.want { + t.Errorf("b64EncodeString() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_readWebUsersTxt(t *testing.T) { + log, _ := logger.NewLogger(os.Stdout, true, false, "testloger") + t.Run("readWebUsersTxt success scenario - returns the username and password with nil error", func(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return []byte("ibm-ace-dashboard-admin afc6dd77-ee58-4a51-8ecd-26f55e2ce2f"), nil + } + + username, password, err := readWebUsersTxt(log, "admin-users.txt") + assert.NoError(t, err) + assert.Equal(t, "ibm-ace-dashboard-admin", username) + assert.Equal(t, "afc6dd77-ee58-4a51-8ecd-26f55e2ce2f", password) + + }) + t.Run("readWebUsersTxt failure scenario - returns empty username and password with error", func(t *testing.T) { + readFile = func(name string) ([]byte, error) { + return []byte{}, errors.New("Error reading file") + } + + username, password, err := readWebUsersTxt(log, "admin-users.txt") + assert.Equal(t, "", username) + assert.Equal(t, "", password) + assert.Error(t, err) + }) +} diff --git a/ubi/Dockerfile-legacy.aceonly b/ubi/Dockerfile-legacy.aceonly new file mode 100644 index 0000000..3cb85cf --- /dev/null +++ b/ubi/Dockerfile-legacy.aceonly @@ -0,0 +1,118 @@ +FROM golang:latest as builder + +WORKDIR /go/src/github.com/ot4i/ace-docker/ + +COPY go.mod . +COPY go.sum . +RUN go mod download + +COPY cmd/ ./cmd +COPY internal/ ./internal +COPY common/ ./common +RUN go version +RUN go build -ldflags "-X \"main.ImageCreated=$(date --iso-8601=seconds)\"" ./cmd/runaceserver/ +RUN go build ./cmd/chkaceready/ +RUN go build ./cmd/chkacehealthy/ + +# Run all unit tests +RUN go test -v ./cmd/runaceserver/ +RUN go test -v ./internal/... +RUN go test -v ./common/... +RUN go vet ./cmd/... ./internal/... ./common/... + +ARG ACE_INSTALL=ace-12.0.1.0.tar.gz +ARG IFIX_LIST="" +WORKDIR /opt/ibm +COPY deps/$ACE_INSTALL . +COPY ./ApplyIFixes.sh /opt/ibm +RUN mkdir ace-12 +RUN tar -xzf $ACE_INSTALL --absolute-names --exclude ace-12.\*/tools --exclude ace-12.\*/server/bin/TADataCollector.sh --exclude ace-12.\*/server/transformationAdvisor/ta-plugin-ace.jar --strip-components 1 --directory /opt/ibm/ace-12 \ + && ./ApplyIFixes.sh $IFIX_LIST \ + && rm ./ApplyIFixes.sh + +FROM registry.access.redhat.com/ubi8/ubi-minimal + +ENV SUMMARY="Integration Server for App Connect Enterprise" \ + DESCRIPTION="Integration Server for App Connect Enterprise" \ + PRODNAME="AppConnectEnterprise" \ + COMPNAME="IntegrationServer" + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="Integration Server for App Connect Enterprise" \ + io.openshift.tags="$PRODNAME,$COMPNAME" \ + com.redhat.component="$PRODNAME-$COMPNAME" \ + name="$PRODNAME/$COMPNAME" \ + vendor="IBM" \ + version="REPLACE_VERSION" \ + release="REPLACE_RELEASE" \ + license="IBM" \ + maintainer="Hybrid Integration Platform Cloud" \ + io.openshift.expose-services="" \ + usage="" + +# Add required license as text file in Liceses directory (GPL, MIT, APACHE, Partner End User Agreement, etc) +COPY /licenses/ /licenses/ + +RUN microdnf update && microdnf install findutils util-linux unzip python3 tar procps openssl && microdnf clean all \ + && ln -s /usr/bin/python3 /usr/local/bin/python \ + && mkdir /etc/ACEOpenTracing /opt/ACEOpenTracing /var/log/ACEOpenTracing && chmod 777 /var/log/ACEOpenTracing /etc/ACEOpenTracing + +# Force reinstall tzdata package to get zoneinfo files +RUN microdnf reinstall tzdata -y + +# Create OpenTracing directories, update permissions and copy in any library or configuration files needed +COPY deps/OpenTracing/library/* ./opt/ACEOpenTracing/ +COPY deps/OpenTracing/config/* ./etc/ACEOpenTracing/ + +WORKDIR /opt/ibm + +COPY --from=builder /opt/ibm/ace-12 /opt/ibm/ace-12 + +# Copy in PID1 process +COPY --from=builder /go/src/github.com/ot4i/ace-docker/runaceserver /usr/local/bin/ +COPY --from=builder /go/src/github.com/ot4i/ace-docker/chkace* /usr/local/bin/ + +# Copy in script files +COPY *.sh /usr/local/bin/ + +# Install kubernetes cli +COPY ubi/install-kubectl.sh /usr/local/bin/ +RUN chmod u+x /usr/local/bin/install-kubectl.sh \ + && install-kubectl.sh + +COPY ubi/generic_invalid/invalid_license.msgflow /home/aceuser/temp/gen +COPY ubi/generic_invalid/InvalidLicenseJava.jar /home/aceuser/temp/gen +COPY ubi/generic_invalid/application.descriptor /home/aceuser/temp + +# Create a user to run as, create the ace workdir, and chmod script files +RUN /opt/ibm/ace-12/ace make registry global accept license silently \ + && useradd -u 1000 -d /home/aceuser -G mqbrkrs,wheel aceuser \ + && mkdir -p /var/mqsi \ + && mkdir -p /home/aceuser/initial-config \ + && su - -c '. /opt/ibm/ace-12/server/bin/mqsiprofile && mqsicreateworkdir /home/aceuser/ace-server' \ + && chmod -R 777 /home/aceuser \ + && chmod -R 777 /var/mqsi \ + && su - -c '. /opt/ibm/ace-12/server/bin/mqsiprofile && echo $MQSI_JREPATH && chmod g+w $MQSI_JREPATH/lib/security/cacerts' \ + && chmod -R 777 /home/aceuser/temp \ + && chmod 777 /opt/ibm/ace-12/server/ODBC/dsdriver/odbc_cli/clidriver/license + +COPY git.commit /home/aceuser/ + +# Set BASH_ENV to source mqsiprofile when using docker exec bash -c +ENV BASH_ENV=/usr/local/bin/ace_env.sh + +# Expose ports. 7600, 7800, 7843 for ACE; 9483 for ACE metrics +EXPOSE 7600 7800 7843 9483 + +WORKDIR /home/aceuser + +ENV LOG_FORMAT=basic + +# Set user to prevent container running as root by default +USER 1000 + +# Set entrypoint to run management script + +ENTRYPOINT ["runaceserver"]