############################### # Installation type section ############################### #New zone mode is set when either existing.zone.domain is unspecified. #An existing fusion-ui-server address for adding to an existing zone type installation. # Remove or leave unset to create in a New Zone. # Required for adding to an Existing Zone. existing.zone.domain= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The existing fusion-ui-server port for adding to an existing zone type installation. # Only has an effect if existing.zone.domain is set. # Optional, defaulting to 8083 # existing.zone.port=8083 ############################### # License section ############################### #The path to a valid fusion license file. # Required for creating a New Zone. # Ignored for Existing Zone installations (the license is downloaded from the remote node) license.file.path= ############################### # Server section ############################### ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The fusion server umask. For example "server.umask=0022". # Optional, defaults to 0022 when commented out. # server.umask= #The Maximum Java Heap Size in GB for the fusion server. # REQUIRED. Must be 1 or greater. Recommended minimum is 16. server.java.heap.max= #The Maximum Java Heap Size in GB for the fusion ihc server. # REQUIRED. Must be 1 or greater. Recommended minimum is 16. ihc.server.java.heap.max= ############################### # Zone section ############################### #The Fully Qualified Domain Name of the fusion server being added; REQUIRED. fusion.domain= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The Fully Qualified Domain Name of the fusion ihc server # Optional, takes the value of fusion.domain if not specified # fusion.ihc.domain= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The Fully Qualified Domain Name of the bind address of the ihc server. # Optional, takes the value of 0.0.0.0 if not specified # fusion.ihc.bind.address= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The Node Name for the fusion server # Optional (generated if not provided based on fusion.domain setting). # fusion.server.node.name= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The dcone port for the server. # Optional, defaulting to 6444 # fusion.server.dcone.port=6444 #The name of the zone being created # REQUIRED for new zone creation. # Ignored for existing zone type installations. fusion.server.zone.name= #The fusion server location name # Optional. # fusion.server.location.name= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #Setting the HTTP policy the Fusion server will use # Optional - default HTTP_ONLY # fusion.http.policy=HTTP_ONLY #The port the Fusion HTTP server will use # Optional - only used if fusion.http.policy=HTTP_ONLY # jetty.http.port=8082 #The port the Fusion HTTPS server will use # Optional - only used if fusion.http.policy=HTTPS_ONLY or fusion.http.policy=BOTH_HTTP_HTTPS # jetty.https.port=8084 #The SSL keystore path # REQUIRED if fusion.http.policy=HTTPS_ONLY or fusion.http.policy=BOTH_HTTP_HTTPS # If the keystore path is required, it must also refer to an existing and readable file. # ssl.keystore= #The SSL keystore password # REQUIRED if fusion.http.policy=HTTPS_ONLY or fusion.http.policy=BOTH_HTTP_HTTPS # ssl.keystore.password= #The SSL key alias (the assigned alias for the key pair) # REQUIRED if fusion.http.policy=HTTPS_ONLY or fusion.http.policy=BOTH_HTTP_HTTPS # ssl.key.alias= #The SSL key password # REQUIRED if fusion.http.policy=HTTPS_ONLY or fusion.http.policy=BOTH_HTTP_HTTPS # ssl.key.password= #The SSL truststore path # REQUIRED if fusion.http.policy=HTTPS_ONLY or fusion.http.policy=BOTH_HTTP_HTTPS # The truststore path MAY be the same as the path specified for ssl.keystore # If the truststore path is provided, it must refer to an existing and readable file. # ssl.truststore= #The SSL truststore Password # REQUIRED if fusion.http.policy=HTTPS_ONLY or fusion.http.policy=BOTH_HTTP_HTTPS # ssl.truststore.password= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #Advanced Option: URI scheme and fs type # Valid options: hdfsWithHdfs, fusionWithHcfs, fusionWithHdfs, fusionAndHdfsWithHdfs # Optional, defaults to hdfsWithHdfs # fusion.scheme.and.fs=hdfsWithHdfs ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### # Number of bytes the client will write before sending a push request to the # fusion server indication bytes are available for transfer. # Optional, defaults is set by your underlying file system. # fs.fusion.push.threshold= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### # Size of the 'chunks' in kilo bytes used in a file transfer. # Optional, default is 4096 # transfer.chunk.size= ############################### # Management Endpoint section ############################### #The type of Management Endpoint. # Valid options: AMBARI, CLOUDERA, MAPR, UNMANAGED_LOCAL_FS, UNMANAGED_ONTAP # REQUIRED for New Zone installations. # Ignored for Existing Zone type installations. management.endpoint.type= #Set to true if there is no Management endpoint # and the system is to be used for local FileSystem access only # localfs.installation.mode= #Set to true if there is no Management endpoint # and the system is to be used for local FileSystem access with OnTap/Netapp support only # ontap.localfs.installation.mode= #The Fully Qualified Domain Name for the Management endpoint # REQUIRED for New Zone installations. # Ignored for Existing Zone type installations. management.endpoint.domain= #The port for the Management endpoint. # REQUIRED for New Zone installations. # Ignored for Existing Zone type installations. management.endpoint.port= #The username for the Management endpoint; should be an existing administrative user within the Cluster Manager. # REQUIRED for all installations. management.endpoint.username= #The password for the management.endpoint.username user. # REQUIRED for all installations. management.endpoint.password= # The username of the local user # Only REQUIRED if it is a local FileSystem or OnTap/Netapp installation # local.user.username= # The password for the local.user.username user # Only REQUIRED if it is a local FileSystem or OnTap/Netapp installation # local.user.password= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #A flag to indicate whether the Management endpoint requires the use of ssl. # Valid options: True, False. # Optional, defaults to False when commented out. Use "management.endpoint.uses.ssl=true" to specify use of ssl. # management.endpoint.uses.ssl= ############################### # Kerberos Settings section ############################### # The installer will automatically detect whether kerberos is enabled on the cluster or not # For a kerberized cluster all three pieces of information are required. #The file path on the server being installed on to specify the krb5.conf kerberos config file. # Required if kerberos is enabled on the cluster. kerberos.config.path= #The file path on the server being set up to specify a keytab file used for validation. # Required if kerberos is enabled on the cluster. kerberos.keytab.path= #The kerberos principal to use. # Required if kerberos is enabled on the cluster. kerberos.principal= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #A flag to indicate whether Fusion UI->Fusion Server kerberos is enabled. # Valid options: true, false. # Optional, defaults to False when commented out. Use "fusion.kerberos.enabled=true" to enable Fusion UI->Fusion Server kerberos. # Ignored if kerberos is not enabled on the cluster. # fusion.kerberos.enabled= #The file path on the server being set up to specify a keytab file used for validation for Fusion UI->Fusion Server kerberos. # Required if both kerberos is enabled on the cluster and fusion.kerberos.enabled=true. fusion.kerberos.keytab.path= #The kerberos principal to use for Fusion UI->Fusion Server kerberos. # Must be the same authentication principal as used by hadoop (ie. the value of hadoop.http.authentication.kerberos.principal) # For more information, see the documentation at: # https://hadoop.apache.org/docs//hadoop-project-dist/hadoop-common/HttpAuthentication.html (replacing ) # Required if both kerberos is enabled on the cluster and fusion.kerberos.enabled=true. fusion.kerberos.principal= #The handshake token storage location for Kerberos that is used to verify that the user has the proper Kerberos credentials to write to the underlying file system. # Ignored if kerberos is not enabled on the cluster. # Optional if kerberos is enabled on the cluster. fusion.handshakeToken.dir= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #A flag to indicate whether Fusion UI Authorization for DCone actions is enabled. # Valid options: true, false. # Optional, defaulting to False when commented out. Use "kerberos.authz.enabled=true" to enable authorization. # Ignored if kerberos.enabled=false. # Ignored if fusion.kerberos.enabled=false # kerberos.authz.enabled= ############################### # Induction section ############################### ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #A flag to allow the skipping of induction when this is the first fusion server being created. # Valid options: true, false. # Optional. defaults to False when commented out. Use "induction.skip=true" to skip induction. # induction.skip= #The fully qualified domain name of an existing fusion server to use for induction. # Required if induction.skip=false induction.remote.node= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The port of the remote fusion server chosen for induction # Only has an effect if induction.skip=false # Optional, defaulting to 8082 # induction.remote.port=8082 #The following three induction options should *only* be provided where the chosen induction node is already protected by kerberos # Under these circumstances all three pieces of information are required. #The host id of the remote fusion server chosen for induction # Required when the chosen induction node is already protected by kerberos induction.remote.node.id= #The location id of the remote fusion server chosen for induction # Required when the chosen induction node is already protected by kerberos induction.remote.location= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The DCone port of the remote fusion server chosen for induction # Defaults to 6444 # induction.remote.dcone.port=6444 ############################### # Advanced Options section ############################### ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The fully qualified domain name of the Fusion UI Server. # This is used to change the domain name from the initial value set during install which is normally 0.0.0.0 # Optional, leaving the initial setting unchanged when commented out. # fusion.ui.domain= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The port number for the Fusion UI Server. # This is used to change the domain name from the initial value set during install which is normally 8083 # Optional, leaving the existing setting unchanged when commented out. # fusion.ui.port= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The property that determines if the application enters a panic state if the dcone database is marked as dirty # The dcone database would be marked as dirty if the system suffers a hard crash # Valid options true, false. Defaults to false when commented out # dcone.system.db.panic.if.dirty= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### #The property that determines if the application enters a panic state if the application database is marked as dirty # The application database would be marked as dirty if the system suffers a hard crash # Valid options true, false. Defaults to false when commented out # application.integration.db.panic.if.dirty= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### # The property that determines if how long to wait between checks to make sure that HDFS has restarted correctly in Cloudera/Ambari # Used in four places, check stop job has finished, check status is INSTALLED, check start job has finished, check status is STARTED # Interval is in seconds. # Default is 30 seconds. # hdfs.restart.interval.in.seconds=30 ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### # The property that determines if how many times we try to check that the HDFS service has restarted in Cloudera/Ambari # Used in four places, check stop job has finished, check status is INSTALLED, check start job has finished, check status is STARTED # Default is 40 times. # hdfs.restart.max.retries=40 ############################### # Plugin Configuration section ############################### # # Currently supported plugins: # - Hive Metastore Replication # ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### # The property that determines the host on which the Wandisco Metastore server will be installed # Default is the hostname of the Fusion Server install host. # plugin.hive.hostname= ######### THIS OPTION DEFAULTS WHEN COMMENTED OUT ########### # The port on which the Wandisco Metastore server will listen # Optional, defaulting to 9084 # plugin.hive.port=9084