Sha256: 55bb58adda4f55f6e2489787aab69d9555c4d0fe0bbae308592d3a59b41ff5bc

Contents?: true

Size: 1.34 KB

Versions: 1

Compression:

Stored size: 1.34 KB

Contents

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>

<property>
  <name>dfs.replication</name>
  <value><%= @node[:poolparty][:block_replication_level] || 3 %></value>
  <description>Default block replication.
  The actual number of replications can be specified when the file is created.
  The default is used if replication is not specified in create time.
  </description>
</property>

<property>
  <name>dfs.name.dir</name>
  <value><%= @node[:poolparty][:hadoop_data_dir] %>/dfs/name</value>
</property>

<property>
  <name>dfs.data.dir</name>
  <value><%= @node[:poolparty][:hadoop_data_dir] %>/dfs/data</value>
</property>

<!--
<property>
  <name>dfs.hosts</name>
  <value>/usr/local/hadoop/conf/hosts</value>
  <description>
    When we have a number of users connecting to the hadoop cloud they may
    accidentially run "start-all.sh" and then their local machine will try to
    connect to the cluster. We limit the machines that can be a part of the
    hdfs to be only those listed in the slaves file. See 
    http://www.cloudera.com/blog/2008/12/03/securing-a-hadoop-cluster-through-a-gateway/
  </description>
</property>
-->

<!-- slave.host.name -->

<property>
<name>dfs.permissions</name>
<value>true</value>
</property>

</configuration>

Version data entries

1 entries across 1 versions & 1 rubygems

Version Path
auser-poolparty-extensions-0.1.1 lib/extensions/hadoop/templates/hdfs-site.xml.erb