0 evaluări0% au considerat acest document util (0 voturi)
54 vizualizări1 pagină
# the only required environment variable is JAVA_HOME. All others are # optional. Set HADOOP_HOME= / usr / local / hadoop-0.20 # the maximum amount of heap to use, in MB. Default is 1000.
# the only required environment variable is JAVA_HOME. All others are # optional. Set HADOOP_HOME= / usr / local / hadoop-0.20 # the maximum amount of heap to use, in MB. Default is 1000.
Drepturi de autor:
Attribution Non-Commercial (BY-NC)
Formate disponibile
Descărcați ca TXT, PDF, TXT sau citiți online pe Scribd
# the only required environment variable is JAVA_HOME. All others are # optional. Set HADOOP_HOME= / usr / local / hadoop-0.20 # the maximum amount of heap to use, in MB. Default is 1000.
Drepturi de autor:
Attribution Non-Commercial (BY-NC)
Formate disponibile
Descărcați ca TXT, PDF, TXT sau citiți online pe Scribd
# The only required environment variable is JAVA_HOME.
All others are
# optional. When running a distributed configuration it is best to # set JAVA_HOME in this file, so that it is correctly defined on # remote nodes. # The javaimplementation to use. Required. set JAVA_HOME=/usr/java/jdk1.6.0_22 set HADOOP_HOME=/usr/local/hadoop-0.20.2 # Extra Java CLASSPATH elements. Optional. export HADOOP_CLASSPATH=${HADOOP_HOME}/hadoop_unit.jar # The maximum amount of heap to use, in MB. Default is 1000. # export HADOOP_HEAPSIZE=2000 # Extra Java runtime options. Empty by default. # export HADOOP_OPTS=-server # Command specific options appended to HADOOP_OPTS when specified export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPT S" export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SEC ONDARYNAMENODE_OPTS" export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPT S" export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPT S" export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER _OPTS" # export HADOOP_TASKTRACKER_OPTS= # The following applies to multiple commands (fs, dfs, fsck, distcp etc) # export HADOOP_CLIENT_OPTS # Extra ssh options. Empty by default. # export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR" # Where log files are stored. $HADOOP_HOME/logs by default. # export HADOOP_LOG_DIR=${HADOOP_HOME}/logs # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default. # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves # host:path where hadoop code should be rsync'd from. Unset by default. # export HADOOP_MASTER=master:/home/$USER/src/hadoop # Seconds to sleep between slave commands. Unset by default. This # can be useful in large clusters, where, e.g., slave rsyncs can # otherwise arrive faster than the master can service them. # export HADOOP_SLAVE_SLEEP=0.1 # The directory where pid files are stored. /tmp by default. # export HADOOP_PID_DIR=/var/hadoop/pids # A string representing this instance of hadoop. $USER by default. # export HADOOP_IDENT_STRING=$USER # The scheduling priority for daemon processes. See 'man nice'. # export HADOOP_NICENESS=10