Imported Upstream version 1.5.1

This commit is contained in:
Mario Fetka
2020-09-22 02:25:22 +02:00
commit 434d6067d9
2103 changed files with 928962 additions and 0 deletions

View File

@@ -0,0 +1,85 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- You may freely edit this file. See commented blocks below for -->
<!-- some examples of how to customize the build. -->
<!-- (If you delete it and reopen the project it will be recreated.) -->
<!-- By default, only the Clean and Build commands use this build script. -->
<!-- Commands such as Run, Debug, and Test only use this build script if -->
<!-- the Compile on Save feature is turned off for the project. -->
<!-- You can turn off the Compile on Save (or Deploy on Save) setting -->
<!-- in the project's Project Properties dialog box.-->
<project name="XtreemFS" default="default" basedir=".">
<description>Builds, tests, and runs the project XtreemFS.</description>
<import file="nbproject/build-impl-1.6.5.xml"/>
<!--
There exist several targets which are by default empty and which can be
used for execution of your tasks. These targets are usually executed
before and after some main targets. They are:
-pre-init: called before initialization of project properties
-post-init: called after initialization of project properties
-pre-compile: called before javac compilation
-post-compile: called after javac compilation
-pre-compile-single: called before javac compilation of single file
-post-compile-single: called after javac compilation of single file
-pre-compile-test: called before javac compilation of JUnit tests
-post-compile-test: called after javac compilation of JUnit tests
-pre-compile-test-single: called before javac compilation of single JUnit test
-post-compile-test-single: called after javac compilation of single JUunit test
-pre-jar: called before JAR building
-post-jar: called after JAR building
-post-clean: called after cleaning build products
(Targets beginning with '-' are not intended to be called on their own.)
Example of inserting an obfuscator after compilation could look like this:
<target name="-post-compile">
<obfuscate>
<fileset dir="${build.classes.dir}"/>
</obfuscate>
</target>
For list of available properties check the imported
nbproject/build-impl.xml file.
Another way to customize the build is by overriding existing main targets.
The targets of interest are:
-init-macrodef-javac: defines macro for javac compilation
-init-macrodef-junit: defines macro for junit execution
-init-macrodef-debug: defines macro for class debugging
-init-macrodef-java: defines macro for class execution
-do-jar-with-manifest: JAR building (if you are using a manifest)
-do-jar-without-manifest: JAR building (if you are not using a manifest)
run: execution of project
-javadoc-build: Javadoc generation
test-report: JUnit report generation
An example of overriding the target for project execution could look like this:
<target name="run" depends="XtreemFS-impl.jar">
<exec dir="bin" executable="launcher.exe">
<arg file="${dist.jar}"/>
</exec>
</target>
Notice that the overridden target depends on the jar target and not only on
the compile target as the regular run target does. Again, for a list of available
properties which you can use, check the target you are overriding in the
nbproject/build-impl.xml file.
-->
<target name="xtreemfs-tests-jar" depends="compile-test">
<echo level="info">Creating XtreemFS-tests.jar.</echo>
<!-- Create a new JAR. -->
<jar destfile="dist/XtreemFS-tests.jar" basedir="build/test/classes">
<include name="org/xtreemfs/servers/test/**/*.class"/>
<zipfileset dir="../../" includes="LICENSE" fullpath="/LICENSE"/>
</jar>
</target>
</project>

View File

@@ -0,0 +1,74 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- You may freely edit this file. See commented blocks below for -->
<!-- some examples of how to customize the build. -->
<!-- (If you delete it and reopen the project it will be recreated.) -->
<!-- By default, only the Clean and Build commands use this build script. -->
<!-- Commands such as Run, Debug, and Test only use this build script if -->
<!-- the Compile on Save feature is turned off for the project. -->
<!-- You can turn off the Compile on Save (or Deploy on Save) setting -->
<!-- in the project's Project Properties dialog box.-->
<project name="XtreemFS" default="default" basedir=".">
<description>Builds, tests, and runs the project XtreemFS.</description>
<import file="nbproject/build-impl.xml"/>
<!--
There exist several targets which are by default empty and which can be
used for execution of your tasks. These targets are usually executed
before and after some main targets. They are:
-pre-init: called before initialization of project properties
-post-init: called after initialization of project properties
-pre-compile: called before javac compilation
-post-compile: called after javac compilation
-pre-compile-single: called before javac compilation of single file
-post-compile-single: called after javac compilation of single file
-pre-compile-test: called before javac compilation of JUnit tests
-post-compile-test: called after javac compilation of JUnit tests
-pre-compile-test-single: called before javac compilation of single JUnit test
-post-compile-test-single: called after javac compilation of single JUunit test
-pre-jar: called before JAR building
-post-jar: called after JAR building
-post-clean: called after cleaning build products
(Targets beginning with '-' are not intended to be called on their own.)
Example of inserting an obfuscator after compilation could look like this:
<target name="-post-compile">
<obfuscate>
<fileset dir="${build.classes.dir}"/>
</obfuscate>
</target>
For list of available properties check the imported
nbproject/build-impl.xml file.
Another way to customize the build is by overriding existing main targets.
The targets of interest are:
-init-macrodef-javac: defines macro for javac compilation
-init-macrodef-junit: defines macro for junit execution
-init-macrodef-debug: defines macro for class debugging
-init-macrodef-java: defines macro for class execution
-do-jar-with-manifest: JAR building (if you are using a manifest)
-do-jar-without-manifest: JAR building (if you are not using a manifest)
run: execution of project
-javadoc-build: Javadoc generation
test-report: JUnit report generation
An example of overriding the target for project execution could look like this:
<target name="run" depends="XtreemFS-impl.jar">
<exec dir="bin" executable="launcher.exe">
<arg file="${dist.jar}"/>
</exec>
</target>
Notice that the overridden target depends on the jar target and not only on
the compile target as the regular run target does. Again, for a list of available
properties which you can use, check the target you are overriding in the
nbproject/build-impl.xml file.
-->
</project>

85
java/servers/build.xml Normal file
View File

@@ -0,0 +1,85 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- You may freely edit this file. See commented blocks below for -->
<!-- some examples of how to customize the build. -->
<!-- (If you delete it and reopen the project it will be recreated.) -->
<!-- By default, only the Clean and Build commands use this build script. -->
<!-- Commands such as Run, Debug, and Test only use this build script if -->
<!-- the Compile on Save feature is turned off for the project. -->
<!-- You can turn off the Compile on Save (or Deploy on Save) setting -->
<!-- in the project's Project Properties dialog box.-->
<project name="XtreemFS" default="default" basedir=".">
<description>Builds, tests, and runs the project XtreemFS.</description>
<import file="nbproject/build-impl.xml"/>
<import file="nbproject/profiler-build-impl.xml"/> <!--
There exist several targets which are by default empty and which can be
used for execution of your tasks. These targets are usually executed
before and after some main targets. They are:
-pre-init: called before initialization of project properties
-post-init: called after initialization of project properties
-pre-compile: called before javac compilation
-post-compile: called after javac compilation
-pre-compile-single: called before javac compilation of single file
-post-compile-single: called after javac compilation of single file
-pre-compile-test: called before javac compilation of JUnit tests
-post-compile-test: called after javac compilation of JUnit tests
-pre-compile-test-single: called before javac compilation of single JUnit test
-post-compile-test-single: called after javac compilation of single JUunit test
-pre-jar: called before JAR building
-post-jar: called after JAR building
-post-clean: called after cleaning build products
(Targets beginning with '-' are not intended to be called on their own.)
Example of inserting an obfuscator after compilation could look like this:
<target name="-post-compile">
<obfuscate>
<fileset dir="${build.classes.dir}"/>
</obfuscate>
</target>
For list of available properties check the imported
nbproject/build-impl.xml file.
Another way to customize the build is by overriding existing main targets.
The targets of interest are:
-init-macrodef-javac: defines macro for javac compilation
-init-macrodef-junit: defines macro for junit execution
-init-macrodef-debug: defines macro for class debugging
-init-macrodef-java: defines macro for class execution
-do-jar-with-manifest: JAR building (if you are using a manifest)
-do-jar-without-manifest: JAR building (if you are not using a manifest)
run: execution of project
-javadoc-build: Javadoc generation
test-report: JUnit report generation
An example of overriding the target for project execution could look like this:
<target name="run" depends="XtreemFS-impl.jar">
<exec dir="bin" executable="launcher.exe">
<arg file="${dist.jar}"/>
</exec>
</target>
Notice that the overridden target depends on the jar target and not only on
the compile target as the regular run target does. Again, for a list of available
properties which you can use, check the target you are overriding in the
nbproject/build-impl.xml file.
-->
<target name="xtreemfs-tests-jar" depends="compile-test">
<echo level="info">Creating XtreemFS-tests.jar.</echo>
<!-- Create a new JAR. -->
<jar destfile="dist/XtreemFS-tests.jar" basedir="build/test/classes">
<include name="org/xtreemfs/**/*.class"/>
<zipfileset dir="../../" includes="LICENSE" fullpath="/LICENSE"/>
</jar>
</target>
</project>

View File

@@ -0,0 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src"/>
<classpathentry kind="src" path="test"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry kind="lib" path="../lib/BabuDB.jar"/>
<classpathentry kind="lib" path="../lib/commons-codec-1.3.jar"/>
<classpathentry kind="lib" path="../lib/test/commons-httpclient-3.0.1.jar"/>
<classpathentry kind="lib" path="../lib/test/commons-httpclient-3.0.1-contrib.jar"/>
<classpathentry kind="lib" path="../lib/test/commons-logging-1.1.jar"/>
<classpathentry kind="lib" path="../lib/test/junit-4.11.jar"/>
<classpathentry kind="lib" path="../lib/test/hamcrest-core-1.3.jar"/>
<classpathentry kind="lib" path="../lib/jdmkrt.jar"/>
<classpathentry kind="lib" path="../lib/jdmktk.jar"/>
<classpathentry kind="lib" path="../lib/protobuf-java-2.5.0.jar"/>
<classpathentry kind="src" path="/xtreemfs_foundation"/>
<classpathentry combineaccessrules="false" kind="src" path="/xtreemfs_flease"/>
<classpathentry kind="output" path="build/classes"/>
</classpath>

View File

@@ -0,0 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src"/>
<classpathentry kind="src" path="test"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry kind="lib" path="../lib/commons-codec-1.3.jar"/>
<classpathentry kind="lib" path="../lib/test/commons-httpclient-3.0.1.jar"/>
<classpathentry kind="lib" path="../lib/test/commons-httpclient-3.0.1-contrib.jar"/>
<classpathentry kind="lib" path="../lib/test/commons-logging-1.1.jar"/>
<classpathentry kind="lib" path="../lib/test/junit-4.3.1.jar"/>
<classpathentry kind="lib" path="../lib/jdmkrt.jar"/>
<classpathentry kind="lib" path="../lib/jdmktk.jar"/>
<classpathentry kind="lib" path="../lib/protobuf-java-2.5.0.jar"/>
<classpathentry kind="src" path="/xtreemfs_foundation"/>
<classpathentry combineaccessrules="false" kind="src" path="/BabuDB_trunk"/>
<classpathentry combineaccessrules="false" kind="src" path="/xtreemfs_flease"/>
<classpathentry kind="output" path="build/classes"/>
</classpath>

View File

@@ -0,0 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>xtreemfs_server</name>
<comment></comment>
<projects>
<project>xtreemfs_foundation</project>
<project>xtreemfs_flease</project>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View File

@@ -0,0 +1,682 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
*** GENERATED FROM project.xml - DO NOT EDIT ***
*** EDIT ../build.xml INSTEAD ***
For the purpose of easier reading the script
is divided into following sections:
- initialization
- compilation
- jar
- execution
- debugging
- javadoc
- junit compilation
- junit execution
- junit debugging
- applet
- cleanup
-->
<project xmlns:j2seproject1="http://www.netbeans.org/ns/j2se-project/1" xmlns:j2seproject3="http://www.netbeans.org/ns/j2se-project/3" xmlns:jaxrpc="http://www.netbeans.org/ns/j2se-project/jax-rpc" basedir=".." default="default" name="XtreemFS-impl">
<target depends="test,jar,javadoc" description="Build and test whole project." name="default"/>
<!--
======================
INITIALIZATION SECTION
======================
-->
<target name="-pre-init">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="-pre-init" name="-init-private">
<property file="nbproject/private/config.properties"/>
<property file="nbproject/private/configs/${config}.properties"/>
<property file="nbproject/private/private.properties"/>
</target>
<target depends="-pre-init,-init-private" name="-init-user">
<property file="${user.properties.file}"/>
<!-- The two properties below are usually overridden -->
<!-- by the active platform. Just a fallback. -->
<property name="default.javac.source" value="1.4"/>
<property name="default.javac.target" value="1.4"/>
</target>
<target depends="-pre-init,-init-private,-init-user" name="-init-project">
<property file="nbproject/configs/${config}.properties"/>
<property file="nbproject/project.properties"/>
</target>
<target depends="-pre-init,-init-private,-init-user,-init-project,-init-macrodef-property" name="-do-init">
<available file="${manifest.file}" property="manifest.available"/>
<condition property="manifest.available+main.class">
<and>
<isset property="manifest.available"/>
<isset property="main.class"/>
<not>
<equals arg1="${main.class}" arg2="" trim="true"/>
</not>
</and>
</condition>
<condition property="manifest.available+main.class+mkdist.available">
<and>
<istrue value="${manifest.available+main.class}"/>
<isset property="libs.CopyLibs.classpath"/>
</and>
</condition>
<condition property="have.tests">
<or>
<available file="${test.src.dir}"/>
</or>
</condition>
<condition property="have.sources">
<or>
<available file="${src.dir}"/>
</or>
</condition>
<condition property="netbeans.home+have.tests">
<and>
<isset property="netbeans.home"/>
<isset property="have.tests"/>
</and>
</condition>
<condition property="no.javadoc.preview">
<and>
<isset property="javadoc.preview"/>
<isfalse value="${javadoc.preview}"/>
</and>
</condition>
<property name="run.jvmargs" value=""/>
<property name="javac.compilerargs" value=""/>
<property name="work.dir" value="${basedir}"/>
<condition property="no.deps">
<and>
<istrue value="${no.dependencies}"/>
</and>
</condition>
<property name="javac.debug" value="true"/>
<property name="javadoc.preview" value="true"/>
<property name="application.args" value=""/>
<property name="source.encoding" value="${file.encoding}"/>
<condition property="javadoc.encoding.used" value="${javadoc.encoding}">
<and>
<isset property="javadoc.encoding"/>
<not>
<equals arg1="${javadoc.encoding}" arg2=""/>
</not>
</and>
</condition>
<property name="javadoc.encoding.used" value="${source.encoding}"/>
<property name="includes" value="**"/>
<property name="excludes" value=""/>
<property name="do.depend" value="false"/>
<condition property="do.depend.true">
<istrue value="${do.depend}"/>
</condition>
<condition else="" property="javac.compilerargs.jaxws" value="-Djava.endorsed.dirs='${jaxws.endorsed.dir}'">
<and>
<isset property="jaxws.endorsed.dir"/>
<available file="nbproject/jaxws-build.xml"/>
</and>
</condition>
</target>
<target name="-post-init">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="-pre-init,-init-private,-init-user,-init-project,-do-init" name="-init-check">
<fail unless="src.dir">Must set src.dir</fail>
<fail unless="test.src.dir">Must set test.src.dir</fail>
<fail unless="build.dir">Must set build.dir</fail>
<fail unless="dist.dir">Must set dist.dir</fail>
<fail unless="build.classes.dir">Must set build.classes.dir</fail>
<fail unless="dist.javadoc.dir">Must set dist.javadoc.dir</fail>
<fail unless="build.test.classes.dir">Must set build.test.classes.dir</fail>
<fail unless="build.test.results.dir">Must set build.test.results.dir</fail>
<fail unless="build.classes.excludes">Must set build.classes.excludes</fail>
<fail unless="dist.jar">Must set dist.jar</fail>
</target>
<target name="-init-macrodef-property">
<macrodef name="property" uri="http://www.netbeans.org/ns/j2se-project/1">
<attribute name="name"/>
<attribute name="value"/>
<sequential>
<property name="@{name}" value="${@{value}}"/>
</sequential>
</macrodef>
</target>
<target name="-init-macrodef-javac">
<macrodef name="javac" uri="http://www.netbeans.org/ns/j2se-project/3">
<attribute default="${src.dir}" name="srcdir"/>
<attribute default="${build.classes.dir}" name="destdir"/>
<attribute default="${javac.classpath}" name="classpath"/>
<attribute default="${includes}" name="includes"/>
<attribute default="${excludes}" name="excludes"/>
<attribute default="${javac.debug}" name="debug"/>
<attribute default="${empty.dir}" name="sourcepath"/>
<attribute default="${empty.dir}" name="gensrcdir"/>
<element name="customize" optional="true"/>
<sequential>
<property location="${build.dir}/empty" name="empty.dir"/>
<mkdir dir="${empty.dir}"/>
<mkdir dir="@{gensrcdir}"/>
<javac debug="@{debug}" deprecation="${javac.deprecation}" destdir="@{destdir}" encoding="${source.encoding}" excludes="@{excludes}" includeantruntime="false" includes="@{includes}" source="${javac.source}" sourcepath="@{sourcepath}" srcdir="@{srcdir}" target="${javac.target}">
<src>
<dirset dir="@{gensrcdir}">
<include name="*"/>
</dirset>
</src>
<classpath>
<path path="@{classpath}"/>
</classpath>
<compilerarg line="${javac.compilerargs} ${javac.compilerargs.jaxws}"/>
<customize/>
</javac>
</sequential>
</macrodef>
<macrodef name="depend" uri="http://www.netbeans.org/ns/j2se-project/3">
<attribute default="${src.dir}" name="srcdir"/>
<attribute default="${build.classes.dir}" name="destdir"/>
<attribute default="${javac.classpath}" name="classpath"/>
<sequential>
<depend cache="${build.dir}/depcache" destdir="@{destdir}" excludes="${excludes}" includes="${includes}" srcdir="@{srcdir}">
<classpath>
<path path="@{classpath}"/>
</classpath>
</depend>
</sequential>
</macrodef>
<macrodef name="force-recompile" uri="http://www.netbeans.org/ns/j2se-project/3">
<attribute default="${build.classes.dir}" name="destdir"/>
<sequential>
<fail unless="javac.includes">Must set javac.includes</fail>
<pathconvert pathsep="," property="javac.includes.binary">
<path>
<filelist dir="@{destdir}" files="${javac.includes}"/>
</path>
<globmapper from="*.java" to="*.class"/>
</pathconvert>
<delete>
<files includes="${javac.includes.binary}"/>
</delete>
</sequential>
</macrodef>
</target>
<target name="-init-macrodef-junit">
<macrodef name="junit" uri="http://www.netbeans.org/ns/j2se-project/3">
<attribute default="${includes}" name="includes"/>
<attribute default="${excludes}" name="excludes"/>
<attribute default="**" name="testincludes"/>
<sequential>
<junit dir="${work.dir}" errorproperty="tests.failed" failureproperty="tests.failed" fork="true" showoutput="true">
<batchtest todir="${build.test.results.dir}">
<fileset dir="${test.src.dir}" excludes="@{excludes},${excludes}" includes="@{includes}">
<filename name="@{testincludes}"/>
</fileset>
</batchtest>
<classpath>
<path path="${run.test.classpath}"/>
</classpath>
<syspropertyset>
<propertyref prefix="test-sys-prop."/>
<mapper from="test-sys-prop.*" to="*" type="glob"/>
</syspropertyset>
<formatter type="brief" usefile="false"/>
<formatter type="xml"/>
<jvmarg line="${run.jvmargs}"/>
</junit>
</sequential>
</macrodef>
</target>
<target name="-init-macrodef-nbjpda">
<macrodef name="nbjpdastart" uri="http://www.netbeans.org/ns/j2se-project/1">
<attribute default="${main.class}" name="name"/>
<attribute default="${debug.classpath}" name="classpath"/>
<attribute default="" name="stopclassname"/>
<sequential>
<nbjpdastart addressproperty="jpda.address" name="@{name}" stopclassname="@{stopclassname}" transport="dt_socket">
<classpath>
<path path="@{classpath}"/>
</classpath>
</nbjpdastart>
</sequential>
</macrodef>
<macrodef name="nbjpdareload" uri="http://www.netbeans.org/ns/j2se-project/1">
<attribute default="${build.classes.dir}" name="dir"/>
<sequential>
<nbjpdareload>
<fileset dir="@{dir}" includes="${fix.classes}">
<include name="${fix.includes}*.class"/>
</fileset>
</nbjpdareload>
</sequential>
</macrodef>
</target>
<target name="-init-debug-args">
<property name="version-output" value="java version &quot;${ant.java.version}"/>
<condition property="have-jdk-older-than-1.4">
<or>
<contains string="${version-output}" substring="java version &quot;1.0"/>
<contains string="${version-output}" substring="java version &quot;1.1"/>
<contains string="${version-output}" substring="java version &quot;1.2"/>
<contains string="${version-output}" substring="java version &quot;1.3"/>
</or>
</condition>
<condition else="-Xdebug" property="debug-args-line" value="-Xdebug -Xnoagent -Djava.compiler=none">
<istrue value="${have-jdk-older-than-1.4}"/>
</condition>
</target>
<target depends="-init-debug-args" name="-init-macrodef-debug">
<macrodef name="debug" uri="http://www.netbeans.org/ns/j2se-project/3">
<attribute default="${main.class}" name="classname"/>
<attribute default="${debug.classpath}" name="classpath"/>
<element name="customize" optional="true"/>
<sequential>
<java classname="@{classname}" dir="${work.dir}" fork="true">
<jvmarg line="${debug-args-line}"/>
<jvmarg value="-Xrunjdwp:transport=dt_socket,address=${jpda.address}"/>
<jvmarg value="-Dfile.encoding=${source.encoding}"/>
<redirector errorencoding="${source.encoding}" inputencoding="${source.encoding}" outputencoding="${source.encoding}"/>
<jvmarg line="${run.jvmargs}"/>
<classpath>
<path path="@{classpath}"/>
</classpath>
<syspropertyset>
<propertyref prefix="run-sys-prop."/>
<mapper from="run-sys-prop.*" to="*" type="glob"/>
</syspropertyset>
<customize/>
</java>
</sequential>
</macrodef>
</target>
<target name="-init-macrodef-java">
<macrodef name="java" uri="http://www.netbeans.org/ns/j2se-project/1">
<attribute default="${main.class}" name="classname"/>
<attribute default="${run.classpath}" name="classpath"/>
<element name="customize" optional="true"/>
<sequential>
<java classname="@{classname}" dir="${work.dir}" fork="true">
<jvmarg value="-Dfile.encoding=${source.encoding}"/>
<redirector errorencoding="${source.encoding}" inputencoding="${source.encoding}" outputencoding="${source.encoding}"/>
<jvmarg line="${run.jvmargs}"/>
<classpath>
<path path="@{classpath}"/>
</classpath>
<syspropertyset>
<propertyref prefix="run-sys-prop."/>
<mapper from="run-sys-prop.*" to="*" type="glob"/>
</syspropertyset>
<customize/>
</java>
</sequential>
</macrodef>
</target>
<target name="-init-presetdef-jar">
<presetdef name="jar" uri="http://www.netbeans.org/ns/j2se-project/1">
<jar compress="${jar.compress}" jarfile="${dist.jar}">
<j2seproject1:fileset dir="${build.classes.dir}"/>
</jar>
</presetdef>
</target>
<target depends="-pre-init,-init-private,-init-user,-init-project,-do-init,-post-init,-init-check,-init-macrodef-property,-init-macrodef-javac,-init-macrodef-junit,-init-macrodef-nbjpda,-init-macrodef-debug,-init-macrodef-java,-init-presetdef-jar" name="init"/>
<!--
===================
COMPILATION SECTION
===================
-->
<target depends="init" name="deps-jar" unless="no.deps"/>
<target depends="init,deps-jar" name="-pre-pre-compile">
<mkdir dir="${build.classes.dir}"/>
</target>
<target name="-pre-compile">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target if="do.depend.true" name="-compile-depend">
<pathconvert property="build.generated.subdirs">
<dirset dir="${build.generated.sources.dir}" erroronmissingdir="false">
<include name="*"/>
</dirset>
</pathconvert>
<j2seproject3:depend srcdir="${src.dir}:${build.generated.subdirs}"/>
</target>
<target depends="init,deps-jar,-pre-pre-compile,-pre-compile,-compile-depend" if="have.sources" name="-do-compile">
<j2seproject3:javac gensrcdir="${build.generated.sources.dir}"/>
<copy todir="${build.classes.dir}">
<fileset dir="${src.dir}" excludes="${build.classes.excludes},${excludes}" includes="${includes}"/>
</copy>
</target>
<target name="-post-compile">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,deps-jar,-pre-pre-compile,-pre-compile,-do-compile,-post-compile" description="Compile project." name="compile"/>
<target name="-pre-compile-single">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,deps-jar,-pre-pre-compile" name="-do-compile-single">
<fail unless="javac.includes">Must select some files in the IDE or set javac.includes</fail>
<j2seproject3:force-recompile/>
<j2seproject3:javac excludes="" gensrcdir="${build.generated.sources.dir}" includes="${javac.includes}" sourcepath="${src.dir}"/>
</target>
<target name="-post-compile-single">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,deps-jar,-pre-pre-compile,-pre-compile-single,-do-compile-single,-post-compile-single" name="compile-single"/>
<!--
====================
JAR BUILDING SECTION
====================
-->
<target depends="init" name="-pre-pre-jar">
<dirname file="${dist.jar}" property="dist.jar.dir"/>
<mkdir dir="${dist.jar.dir}"/>
</target>
<target name="-pre-jar">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,compile,-pre-pre-jar,-pre-jar" name="-do-jar-without-manifest" unless="manifest.available">
<j2seproject1:jar/>
</target>
<target depends="init,compile,-pre-pre-jar,-pre-jar" if="manifest.available" name="-do-jar-with-manifest" unless="manifest.available+main.class">
<j2seproject1:jar manifest="${manifest.file}"/>
</target>
<target depends="init,compile,-pre-pre-jar,-pre-jar" if="manifest.available+main.class" name="-do-jar-with-mainclass" unless="manifest.available+main.class+mkdist.available">
<j2seproject1:jar manifest="${manifest.file}">
<j2seproject1:manifest>
<j2seproject1:attribute name="Main-Class" value="${main.class}"/>
</j2seproject1:manifest>
</j2seproject1:jar>
<echo>To run this application from the command line without Ant, try:</echo>
<property location="${build.classes.dir}" name="build.classes.dir.resolved"/>
<property location="${dist.jar}" name="dist.jar.resolved"/>
<pathconvert property="run.classpath.with.dist.jar">
<path path="${run.classpath}"/>
<map from="${build.classes.dir.resolved}" to="${dist.jar.resolved}"/>
</pathconvert>
<echo>java -cp "${run.classpath.with.dist.jar}" ${main.class}</echo>
</target>
<target depends="init,compile,-pre-pre-jar,-pre-jar" if="manifest.available+main.class+mkdist.available" name="-do-jar-with-libraries">
<property location="${build.classes.dir}" name="build.classes.dir.resolved"/>
<pathconvert property="run.classpath.without.build.classes.dir">
<path path="${run.classpath}"/>
<map from="${build.classes.dir.resolved}" to=""/>
</pathconvert>
<pathconvert pathsep=" " property="jar.classpath">
<path path="${run.classpath.without.build.classes.dir}"/>
<chainedmapper>
<flattenmapper/>
<globmapper from="*" to="lib/*"/>
</chainedmapper>
</pathconvert>
<taskdef classname="org.netbeans.modules.java.j2seproject.copylibstask.CopyLibs" classpath="${libs.CopyLibs.classpath}" name="copylibs"/>
<copylibs compress="${jar.compress}" jarfile="${dist.jar}" manifest="${manifest.file}" runtimeclasspath="${run.classpath.without.build.classes.dir}">
<fileset dir="${build.classes.dir}"/>
<manifest>
<attribute name="Main-Class" value="${main.class}"/>
<attribute name="Class-Path" value="${jar.classpath}"/>
</manifest>
</copylibs>
<echo>To run this application from the command line without Ant, try:</echo>
<property location="${dist.jar}" name="dist.jar.resolved"/>
<echo>java -jar "${dist.jar.resolved}"</echo>
</target>
<target depends="init,compile,-pre-pre-jar,-pre-jar" if="libs.CopyLibs.classpath" name="-do-jar-with-libraries-without-manifest" unless="manifest.available+main.class">
<property location="${build.classes.dir}" name="build.classes.dir.resolved"/>
<pathconvert property="run.classpath.without.build.classes.dir">
<path path="${run.classpath}"/>
<map from="${build.classes.dir.resolved}" to=""/>
</pathconvert>
<pathconvert pathsep=" " property="jar.classpath">
<path path="${run.classpath.without.build.classes.dir}"/>
<chainedmapper>
<flattenmapper/>
<globmapper from="*" to="lib/*"/>
</chainedmapper>
</pathconvert>
<taskdef classname="org.netbeans.modules.java.j2seproject.copylibstask.CopyLibs" classpath="${libs.CopyLibs.classpath}" name="copylibs"/>
<copylibs compress="${jar.compress}" jarfile="${dist.jar}" runtimeclasspath="${run.classpath.without.build.classes.dir}">
<fileset dir="${build.classes.dir}"/>
</copylibs>
</target>
<target name="-post-jar">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,compile,-pre-jar,-do-jar-with-manifest,-do-jar-without-manifest,-do-jar-with-mainclass,-do-jar-with-libraries,-do-jar-with-libraries-without-manifest,-post-jar" description="Build JAR." name="jar"/>
<!--
=================
EXECUTION SECTION
=================
-->
<target depends="init,compile" description="Run a main class." name="run">
<j2seproject1:java>
<customize>
<arg line="${application.args}"/>
</customize>
</j2seproject1:java>
</target>
<target name="-do-not-recompile">
<property name="javac.includes.binary" value=""/>
</target>
<target depends="init,-do-not-recompile,compile-single" name="run-single">
<fail unless="run.class">Must select one file in the IDE or set run.class</fail>
<j2seproject1:java classname="${run.class}"/>
</target>
<target depends="init,-do-not-recompile,compile-test-single" name="run-test-with-main">
<fail unless="run.class">Must select one file in the IDE or set run.class</fail>
<j2seproject1:java classname="${run.class}" classpath="${run.test.classpath}"/>
</target>
<!--
=================
DEBUGGING SECTION
=================
-->
<target depends="init" if="netbeans.home" name="-debug-start-debugger">
<j2seproject1:nbjpdastart name="${debug.class}"/>
</target>
<target depends="init" if="netbeans.home" name="-debug-start-debugger-main-test">
<j2seproject1:nbjpdastart classpath="${debug.test.classpath}" name="${debug.class}"/>
</target>
<target depends="init,compile" name="-debug-start-debuggee">
<j2seproject3:debug>
<customize>
<arg line="${application.args}"/>
</customize>
</j2seproject3:debug>
</target>
<target depends="init,compile,-debug-start-debugger,-debug-start-debuggee" description="Debug project in IDE." if="netbeans.home" name="debug"/>
<target depends="init" if="netbeans.home" name="-debug-start-debugger-stepinto">
<j2seproject1:nbjpdastart stopclassname="${main.class}"/>
</target>
<target depends="init,compile,-debug-start-debugger-stepinto,-debug-start-debuggee" if="netbeans.home" name="debug-stepinto"/>
<target depends="init,compile-single" if="netbeans.home" name="-debug-start-debuggee-single">
<fail unless="debug.class">Must select one file in the IDE or set debug.class</fail>
<j2seproject3:debug classname="${debug.class}"/>
</target>
<target depends="init,-do-not-recompile,compile-single,-debug-start-debugger,-debug-start-debuggee-single" if="netbeans.home" name="debug-single"/>
<target depends="init,compile-test-single" if="netbeans.home" name="-debug-start-debuggee-main-test">
<fail unless="debug.class">Must select one file in the IDE or set debug.class</fail>
<j2seproject3:debug classname="${debug.class}" classpath="${debug.test.classpath}"/>
</target>
<target depends="init,-do-not-recompile,compile-test-single,-debug-start-debugger-main-test,-debug-start-debuggee-main-test" if="netbeans.home" name="debug-test-with-main"/>
<target depends="init" name="-pre-debug-fix">
<fail unless="fix.includes">Must set fix.includes</fail>
<property name="javac.includes" value="${fix.includes}.java"/>
</target>
<target depends="init,-pre-debug-fix,compile-single" if="netbeans.home" name="-do-debug-fix">
<j2seproject1:nbjpdareload/>
</target>
<target depends="init,-pre-debug-fix,-do-debug-fix" if="netbeans.home" name="debug-fix"/>
<!--
===============
JAVADOC SECTION
===============
-->
<target depends="init" name="-javadoc-build">
<mkdir dir="${dist.javadoc.dir}"/>
<javadoc additionalparam="${javadoc.additionalparam}" author="${javadoc.author}" charset="UTF-8" destdir="${dist.javadoc.dir}" docencoding="UTF-8" encoding="${javadoc.encoding.used}" failonerror="true" noindex="${javadoc.noindex}" nonavbar="${javadoc.nonavbar}" notree="${javadoc.notree}" private="${javadoc.private}" source="${javac.source}" splitindex="${javadoc.splitindex}" use="${javadoc.use}" useexternalfile="true" version="${javadoc.version}" windowtitle="${javadoc.windowtitle}">
<classpath>
<path path="${javac.classpath}"/>
</classpath>
<fileset dir="${src.dir}" excludes="${excludes}" includes="${includes}">
<filename name="**/*.java"/>
</fileset>
<fileset dir="${build.generated.sources.dir}" erroronmissingdir="false">
<include name="**/*.java"/>
</fileset>
</javadoc>
</target>
<target depends="init,-javadoc-build" if="netbeans.home" name="-javadoc-browse" unless="no.javadoc.preview">
<nbbrowse file="${dist.javadoc.dir}/index.html"/>
</target>
<target depends="init,-javadoc-build,-javadoc-browse" description="Build Javadoc." name="javadoc"/>
<!--
=========================
JUNIT COMPILATION SECTION
=========================
-->
<target depends="init,compile" if="have.tests" name="-pre-pre-compile-test">
<mkdir dir="${build.test.classes.dir}"/>
</target>
<target name="-pre-compile-test">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target if="do.depend.true" name="-compile-test-depend">
<j2seproject3:depend classpath="${javac.test.classpath}" destdir="${build.test.classes.dir}" srcdir="${test.src.dir}"/>
</target>
<target depends="init,compile,-pre-pre-compile-test,-pre-compile-test,-compile-test-depend" if="have.tests" name="-do-compile-test">
<j2seproject3:javac classpath="${javac.test.classpath}" debug="true" destdir="${build.test.classes.dir}" srcdir="${test.src.dir}"/>
<copy todir="${build.test.classes.dir}">
<fileset dir="${test.src.dir}" excludes="${build.classes.excludes},${excludes}" includes="${includes}"/>
</copy>
</target>
<target name="-post-compile-test">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,compile,-pre-pre-compile-test,-pre-compile-test,-do-compile-test,-post-compile-test" name="compile-test"/>
<target name="-pre-compile-test-single">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,compile,-pre-pre-compile-test,-pre-compile-test-single" if="have.tests" name="-do-compile-test-single">
<fail unless="javac.includes">Must select some files in the IDE or set javac.includes</fail>
<j2seproject3:force-recompile destdir="${build.test.classes.dir}"/>
<j2seproject3:javac classpath="${javac.test.classpath}" debug="true" destdir="${build.test.classes.dir}" excludes="" includes="${javac.includes}" sourcepath="${test.src.dir}" srcdir="${test.src.dir}"/>
<copy todir="${build.test.classes.dir}">
<fileset dir="${test.src.dir}" excludes="${build.classes.excludes},${excludes}" includes="${includes}"/>
</copy>
</target>
<target name="-post-compile-test-single">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,compile,-pre-pre-compile-test,-pre-compile-test-single,-do-compile-test-single,-post-compile-test-single" name="compile-test-single"/>
<!--
=======================
JUNIT EXECUTION SECTION
=======================
-->
<target depends="init" if="have.tests" name="-pre-test-run">
<mkdir dir="${build.test.results.dir}"/>
</target>
<target depends="init,compile-test,-pre-test-run" if="have.tests" name="-do-test-run">
<j2seproject3:junit testincludes="**/*Test.java"/>
</target>
<target depends="init,compile-test,-pre-test-run,-do-test-run" if="have.tests" name="-post-test-run">
<fail if="tests.failed" unless="ignore.failing.tests">Some tests failed; see details above.</fail>
</target>
<target depends="init" if="have.tests" name="test-report"/>
<target depends="init" if="netbeans.home+have.tests" name="-test-browse"/>
<target depends="init,compile-test,-pre-test-run,-do-test-run,test-report,-post-test-run,-test-browse" description="Run unit tests." name="test"/>
<target depends="init" if="have.tests" name="-pre-test-run-single">
<mkdir dir="${build.test.results.dir}"/>
</target>
<target depends="init,compile-test-single,-pre-test-run-single" if="have.tests" name="-do-test-run-single">
<fail unless="test.includes">Must select some files in the IDE or set test.includes</fail>
<j2seproject3:junit excludes="" includes="${test.includes}"/>
</target>
<target depends="init,compile-test-single,-pre-test-run-single,-do-test-run-single" if="have.tests" name="-post-test-run-single">
<fail if="tests.failed" unless="ignore.failing.tests">Some tests failed; see details above.</fail>
</target>
<target depends="init,-do-not-recompile,compile-test-single,-pre-test-run-single,-do-test-run-single,-post-test-run-single" description="Run single unit test." name="test-single"/>
<!--
=======================
JUNIT DEBUGGING SECTION
=======================
-->
<target depends="init,compile-test" if="have.tests" name="-debug-start-debuggee-test">
<fail unless="test.class">Must select one file in the IDE or set test.class</fail>
<property location="${build.test.results.dir}/TEST-${test.class}.xml" name="test.report.file"/>
<delete file="${test.report.file}"/>
<mkdir dir="${build.test.results.dir}"/>
<j2seproject3:debug classname="org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner" classpath="${ant.home}/lib/ant.jar:${ant.home}/lib/ant-junit.jar:${debug.test.classpath}">
<customize>
<syspropertyset>
<propertyref prefix="test-sys-prop."/>
<mapper from="test-sys-prop.*" to="*" type="glob"/>
</syspropertyset>
<arg value="${test.class}"/>
<arg value="showoutput=true"/>
<arg value="formatter=org.apache.tools.ant.taskdefs.optional.junit.BriefJUnitResultFormatter"/>
<arg value="formatter=org.apache.tools.ant.taskdefs.optional.junit.XMLJUnitResultFormatter,${test.report.file}"/>
</customize>
</j2seproject3:debug>
</target>
<target depends="init,compile-test" if="netbeans.home+have.tests" name="-debug-start-debugger-test">
<j2seproject1:nbjpdastart classpath="${debug.test.classpath}" name="${test.class}"/>
</target>
<target depends="init,-do-not-recompile,compile-test-single,-debug-start-debugger-test,-debug-start-debuggee-test" name="debug-test"/>
<target depends="init,-pre-debug-fix,compile-test-single" if="netbeans.home" name="-do-debug-fix-test">
<j2seproject1:nbjpdareload dir="${build.test.classes.dir}"/>
</target>
<target depends="init,-pre-debug-fix,-do-debug-fix-test" if="netbeans.home" name="debug-fix-test"/>
<!--
=========================
APPLET EXECUTION SECTION
=========================
-->
<target depends="init,compile-single" name="run-applet">
<fail unless="applet.url">Must select one file in the IDE or set applet.url</fail>
<j2seproject1:java classname="sun.applet.AppletViewer">
<customize>
<arg value="${applet.url}"/>
</customize>
</j2seproject1:java>
</target>
<!--
=========================
APPLET DEBUGGING SECTION
=========================
-->
<target depends="init,compile-single" if="netbeans.home" name="-debug-start-debuggee-applet">
<fail unless="applet.url">Must select one file in the IDE or set applet.url</fail>
<j2seproject3:debug classname="sun.applet.AppletViewer">
<customize>
<arg value="${applet.url}"/>
</customize>
</j2seproject3:debug>
</target>
<target depends="init,compile-single,-debug-start-debugger,-debug-start-debuggee-applet" if="netbeans.home" name="debug-applet"/>
<!--
===============
CLEANUP SECTION
===============
-->
<target depends="init" name="deps-clean" unless="no.deps"/>
<target depends="init" name="-do-clean">
<delete dir="${build.dir}"/>
<delete dir="${dist.dir}"/>
</target>
<target name="-post-clean">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target depends="init,deps-clean,-do-clean,-post-clean" description="Clean build products." name="clean"/>
</project>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
build.xml.data.CRC32=4a9eff70
build.xml.script.CRC32=ce2ddeb0
build.xml.stylesheet.CRC32=958a1d3e
# This file is used by a NetBeans-based IDE to track changes in generated files such as build-impl.xml.
# Do not edit this file. You may delete it but then the IDE will never regenerate such files for you.
nbproject/build-impl.xml.data.CRC32=53818f37
nbproject/build-impl.xml.script.CRC32=42a9c2ae
nbproject/build-impl.xml.stylesheet.CRC32=0ae3a408@1.44.1.45
nbproject/profiler-build-impl.xml.data.CRC32=4a9eff70
nbproject/profiler-build-impl.xml.script.CRC32=abda56ed
nbproject/profiler-build-impl.xml.stylesheet.CRC32=42cb6bcf

View File

@@ -0,0 +1,131 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
*** GENERATED FROM project.xml - DO NOT EDIT ***
*** EDIT ../build.xml INSTEAD ***
For the purpose of easier reading the script
is divided into following sections:
- initialization
- profiling
- applet profiling
-->
<project name="-profiler-impl" default="profile" basedir="..">
<target name="default" depends="profile" description="Build and profile the project."/>
<!--
======================
INITIALIZATION SECTION
======================
-->
<target name="profile-init" depends="-profile-pre-init, init, -profile-post-init, -profile-init-macrodef-profile, -profile-init-check"/>
<target name="-profile-pre-init">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target name="-profile-post-init">
<!-- Empty placeholder for easier customization. -->
<!-- You can override this target in the ../build.xml file. -->
</target>
<target name="-profile-init-macrodef-profile">
<macrodef name="resolve">
<attribute name="name"/>
<attribute name="value"/>
<sequential>
<property name="@{name}" value="${env.@{value}}"/>
</sequential>
</macrodef>
<macrodef name="profile">
<attribute name="classname" default="${main.class}"/>
<element name="customize" optional="true"/>
<sequential>
<property environment="env"/>
<resolve name="profiler.current.path" value="${profiler.info.pathvar}"/>
<java fork="true" classname="@{classname}" dir="${profiler.info.dir}" jvm="${profiler.info.jvm}">
<jvmarg value="${profiler.info.jvmargs.agent}"/>
<jvmarg line="${profiler.info.jvmargs}"/>
<env key="${profiler.info.pathvar}" path="${profiler.info.agentpath}:${profiler.current.path}"/>
<arg line="${application.args}"/>
<classpath>
<path path="${run.classpath}"/>
</classpath>
<syspropertyset>
<propertyref prefix="run-sys-prop."/>
<mapper type="glob" from="run-sys-prop.*" to="*"/>
</syspropertyset>
<customize/>
</java>
</sequential>
</macrodef>
</target>
<target name="-profile-init-check" depends="-profile-pre-init, init, -profile-post-init, -profile-init-macrodef-profile">
<fail unless="profiler.info.jvm">Must set JVM to use for profiling in profiler.info.jvm</fail>
<fail unless="profiler.info.jvmargs.agent">Must set profiler agent JVM arguments in profiler.info.jvmargs.agent</fail>
</target>
<!--
=================
PROFILING SECTION
=================
-->
<target name="profile" if="netbeans.home" depends="profile-init,compile" description="Profile a project in the IDE.">
<nbprofiledirect>
<classpath>
<path path="${run.classpath}"/>
</classpath>
</nbprofiledirect>
<profile/>
</target>
<target name="profile-single" if="netbeans.home" depends="profile-init,compile-single" description="Profile a selected class in the IDE.">
<fail unless="profile.class">Must select one file in the IDE or set profile.class</fail>
<nbprofiledirect>
<classpath>
<path path="${run.classpath}"/>
</classpath>
</nbprofiledirect>
<profile classname="${profile.class}"/>
</target>
<!--
=========================
APPLET PROFILING SECTION
=========================
-->
<target name="profile-applet" if="netbeans.home" depends="profile-init,compile-single">
<nbprofiledirect>
<classpath>
<path path="${run.classpath}"/>
</classpath>
</nbprofiledirect>
<profile classname="sun.applet.AppletViewer">
<customize>
<arg value="${applet.url}"/>
</customize>
</profile>
</target>
<!--
=========================
TESTS PROFILING SECTION
=========================
-->
<target name="profile-test-single" if="netbeans.home" depends="profile-init,compile-test-single">
<nbprofiledirect>
<classpath>
<path path="${run.test.classpath}"/>
</classpath>
</nbprofiledirect>
<junit showoutput="true" fork="true" dir="${profiler.info.dir}" jvm="${profiler.info.jvm}" failureproperty="tests.failed" errorproperty="tests.failed">
<env key="${profiler.info.pathvar}" path="${profiler.info.agentpath}:${profiler.current.path}"/>
<jvmarg value="${profiler.info.jvmargs.agent}"/>
<jvmarg line="${profiler.info.jvmargs}"/>
<test name="${profile.class}"/>
<classpath>
<path path="${run.test.classpath}"/>
</classpath>
<syspropertyset>
<propertyref prefix="test-sys-prop."/>
<mapper type="glob" from="test-sys-prop.*" to="*"/>
</syspropertyset>
<formatter type="brief" usefile="false"/>
<formatter type="xml"/>
</junit>
</target>
</project>

View File

@@ -0,0 +1,101 @@
annotation.processing.enabled=true
annotation.processing.enabled.in.editor=false
annotation.processing.run.all.processors=true
application.args=
application.title=XtreemFS
application.vendor=bjko
build.classes.dir=${build.dir}/classes
build.classes.excludes=**/*.java,**/*.form
# This directory is removed when the project is cleaned:
build.dir=build
build.generated.dir=${build.dir}/generated
build.generated.sources.dir=${build.dir}/generated-sources
# Only compile against the classpath explicitly listed here:
build.sysclasspath=ignore
build.test.classes.dir=${build.dir}/test/classes
build.test.results.dir=${build.dir}/test/results
debug.classpath=\
${run.classpath}
debug.test.classpath=\
${run.test.classpath}
# This directory is removed when the project is cleaned:
dist.dir=dist
dist.jar=${dist.dir}/XtreemFS.jar
dist.javadoc.dir=${dist.dir}/javadoc
endorsed.classpath=
excludes=
file.reference.BabuDB.jar-1=../lib/BabuDB.jar
file.reference.bcprov-jdk16-139.jar=lib/bcprov-jdk16-139.jar
file.reference.cdaclient.jar=lib/cdaclient.jar
file.reference.junit-4.11.jar=../lib/test/junit-4.11.jar
file.reference.config.jar=lib/config.jar
file.reference.je-3.2.13.jar=lib/je-3.2.13.jar
file.reference.protobuf-java-2.5.0.jar=../lib/protobuf-java-2.5.0.jar
file.reference.xbean.jar=lib/xbean.jar
file.reference.jdmkrt.jar=../lib/jdmkrt.jar
file.reference.jdmktk.jar=../lib/jdmktk.jar
includes=**
jar.archive.disabled=${jnlp.enabled}
jar.compress=false
jar.index=${jnlp.enabled}
javac.classpath=\
${file.reference.BabuDB.jar-1}:\
${file.reference.protobuf-java-2.5.0.jar}:\
${reference.XtreemFS-foundation.jar}:\
${reference.Flease.jar}:\
${file.reference.junit-4.11.jar}:\
${file.reference.jdmkrt.jar}:\
${file.reference.jdmktk.jar}
# Space-separated list of extra javac options
javac.compilerargs=
javac.deprecation=false
javac.processorpath=\
${javac.classpath}
javac.source=1.6
javac.target=1.6
javac.test.classpath=\
${javac.classpath}:\
${build.classes.dir}:\
${libs.junit_4.classpath}
javadoc.additionalparam=
javadoc.author=false
javadoc.encoding=
javadoc.noindex=false
javadoc.nonavbar=false
javadoc.notree=false
javadoc.private=false
javadoc.splitindex=true
javadoc.use=true
javadoc.version=false
javadoc.windowtitle=
jnlp.codebase.type=local
jnlp.codebase.url=file:/home/bjko/xtreemos/xtreemfs/googlecode/servers/dist
jnlp.descriptor=application
jnlp.enabled=false
jnlp.mixed.code=default
jnlp.offline-allowed=false
jnlp.signed=false
jnlp.signing=
jnlp.signing.alias=
jnlp.signing.keystore=
main.class=org.xtreemfs.mrc.MRC
manifest.file=manifest.mf
meta.inf.dir=${src.dir}/META-INF
mkdist.disabled=false
platform.active=default_platform
project.Flease=../flease
project.XtreemFS-foundation=../foundation
reference.Flease.jar=${project.Flease}/dist/Flease.jar
reference.XtreemFS-foundation.jar=${project.XtreemFS-foundation}/dist/Foundation.jar
run.classpath=\
${javac.classpath}:\
${build.classes.dir}
# Space-separated list of JVM arguments used when running the project
# (you may also define separate properties like run-sys-prop.name=value instead of -Dname=value
# or test-sys-prop.name=value to set system properties for unit tests):
run.jvmargs=-ea
run.test.classpath=\
${javac.test.classpath}:\
${build.test.classes.dir}
src.dir=src
test.src.dir=test

View File

@@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?><project xmlns="http://www.netbeans.org/ns/project/1">
<type>org.netbeans.modules.java.j2seproject</type>
<configuration>
<data xmlns="http://www.netbeans.org/ns/j2se-project/3">
<name>XtreemFS</name>
<minimum-ant-version>1.6.5</minimum-ant-version>
<source-roots>
<root id="src.dir"/>
</source-roots>
<test-roots>
<root id="test.src.dir"/>
</test-roots>
</data>
<references xmlns="http://www.netbeans.org/ns/ant-project-references/1">
<reference>
<foreign-project>Flease</foreign-project>
<artifact-type>jar</artifact-type>
<script>build.xml</script>
<target>jar</target>
<clean-target>clean</clean-target>
<id>jar</id>
</reference>
<reference>
<foreign-project>XtreemFS-foundation</foreign-project>
<artifact-type>jar</artifact-type>
<script>build.xml</script>
<target>jar</target>
<clean-target>clean</clean-target>
<id>jar</id>
</reference>
</references>
</configuration>
</project>

View File

@@ -0,0 +1,201 @@
/*
* Copyright (c) 2009 by Jan Stender, Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import org.xtreemfs.foundation.TimeSync;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.util.OutputUtils;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.SnapConfig;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XCap;
/**
* This class implements a Java representation of a capability.
*
* In general, a capability can be seen as a token granting the permission to
* carry out an operation on a remote server.
*
* When a client wants open a file, the MRC checks whether the respective kind
* of access is granted. If so, the MRC sends a capability to the client, which
* in turn sends the capability to the OSD when file contents are accessed or
* modified. The OSD has to check whether the capability is valid. A capability
* is valid as long as it has a correct signature and has not expired yet.
* Capabilities can be renewed in order to extend their validity.
*
* Each capability contains a file ID, a string representing the access mode, an
* expiration time stamp representing the time in seconds from 1/1/1970, a
* string containing data that can be used to verify the client identity, as
* well as a signature added by the MRC.
*
*
* @author stender
*
*/
public class Capability {
private XCap xcap;
private final String sharedSecret;
/**
* Creates a capability from a given set of data. A signature will be added
* automatically. This constructor is meant to initially create a capability
* at the MRC.
*
* @param fileId
* the file ID
* @param accessMode
* the access mode
* @param validity
* the relative validity time span in seconds
* @param expires
* the absolute expiration time stamp (seconds since 1970)
* @param epochNo
* the epoch number associated with the capability; epoch numbers
* are incremented each time the file is truncated or deleted
* @param sharedSecret
* the shared secret to be used to sign the capability
*/
public Capability(String fileId, int accessMode, int validity, long expires, String clientIdentity,
int epochNo, boolean replicateOnClose, SnapConfig snapConfig, long snapTimestamp, String sharedSecret) {
this.sharedSecret = sharedSecret;
XCap.Builder builder = XCap.newBuilder().setAccessMode(accessMode).setClientIdentity(clientIdentity).setExpireTimeS(expires).setExpireTimeoutS(validity).
setFileId(fileId).setReplicateOnClose(replicateOnClose).setTruncateEpoch(epochNo).setSnapConfig(snapConfig).setSnapTimestamp(snapTimestamp);
final String sig = calcSignature(builder);
builder.setServerSignature(sig);
xcap = builder.build();
}
/**
* Wrapper for XCap objects.
*
* @param xcap
* the parsed XCap object
* @param sharedSecret
* the shared secret (from configuration file)
*/
public Capability(XCap xcap, String sharedSecret) {
this.xcap = xcap;
this.sharedSecret = sharedSecret;
}
public XCap getXCap() {
return this.xcap;
}
public String getFileId() {
return xcap.getFileId();
}
public int getAccessMode() {
return xcap.getAccessMode();
}
/**
* returns the absolute time, when the capability expires (in seconds)
*
* @return
*/
public long getExpires() {
return xcap.getExpireTimeS();
}
public String getClientIdentity() {
return xcap.getClientIdentity();
}
public int getEpochNo() {
return xcap.getTruncateEpoch();
}
public String getSignature() {
return xcap.getServerSignature();
}
/**
* Checks whether the capability is valid.
*
* @return <code>true</code>, if it hasn't expired yet and the signature is
* valid, <code>false</code>, otherwise
*/
public boolean isValid() {
return !hasExpired() && hasValidSignature();
}
/**
* Checks whether the capability has expired.
*
* @return <code>true</code>, if the current system time is after the
* expiration time stamp <code>false</code>, otherwise
*/
public boolean hasExpired() {
return TimeSync.getGlobalTime() / 1000 > xcap.getExpireTimeS();
}
/**
* Checks whether the capability has a valid signature.
*
* @return <code>true</code>, if the signature is valid, <code>false</code>,
* otherwise
*/
public boolean hasValidSignature() {
return xcap.getServerSignature().equals(calcSignature(xcap.toBuilder()));
}
public boolean isReplicateOnClose() {
return xcap.getReplicateOnClose();
}
public SnapConfig getSnapConfig() {
return xcap.getSnapConfig();
}
public long getSnapTimestamp() {
return xcap.getSnapTimestamp();
}
/**
* Returns a string representation of the capability.
*
* @return a JSON-formatted string representing the capability.
*/
public String toString() {
return xcap.toString();
}
protected String calcSignature(XCap.Builder builder) {
// right now, we use a shared secret between MRC and OSDs
// as soon as we have a Public Key Infrastructure, signatures
// will be generated and checked by means of asymmetric encryption
// techniques
String plainText = builder.getFileId() + Integer.toString(builder.getAccessMode())
+ Long.toString(builder.getExpireTimeS()) + Long.toString(builder.getTruncateEpoch())
+ Long.toString(builder.getSnapConfig().getNumber()) + Long.toString(builder.getSnapTimestamp())
+ sharedSecret;
try {
MessageDigest md5 = MessageDigest.getInstance("MD5");
md5.update(plainText.getBytes());
byte[] digest = md5.digest();
return OutputUtils.byteArrayToHexString(digest);
} catch (NoSuchAlgorithmException exc) {
Logging.logError(Logging.LEVEL_ERROR, this, exc);
return null;
}
}
}

View File

@@ -0,0 +1,26 @@
/*
* Copyright (c) 2010 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.Auth;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.AuthType;
/**
*
* @author bjko
*/
public class GlobalConstants {
public static final Auth AUTH_NONE;
static {
AUTH_NONE = Auth.newBuilder().setAuthType(AuthType.AUTH_NONE).build();
}
}

View File

@@ -0,0 +1,623 @@
/*
* Copyright (c) 2008-2010 by Jan Stender, Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.xtreemfs.common.config.ServiceConfig;
import org.xtreemfs.common.util.NetUtils;
import org.xtreemfs.common.uuids.ServiceUUID;
import org.xtreemfs.common.uuids.UUIDResolver;
import org.xtreemfs.dir.DIRClient;
import org.xtreemfs.foundation.LifeCycleThread;
import org.xtreemfs.foundation.TimeSync;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.foundation.pbrpc.Schemes;
import org.xtreemfs.foundation.pbrpc.client.PBRPCException;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.Auth;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.AuthType;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.POSIXErrno;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.AddressMapping;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.AddressMappingSet;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.Configuration;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.Service;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.ServiceDataMap;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.ServiceSet;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.ServiceType;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.KeyValuePair;
import sun.misc.Signal;
import sun.misc.SignalHandler;
/**
* A thread that regularly sends a heartbeat signal with fresh service data to the Directory Service.
*/
public class HeartbeatThread extends LifeCycleThread {
/**
* An interface that generates service data to be sent to the Directory Service. Each time a heartbeat
* signal is sent, new service data will be generated by means of invoking <tt>getServiceData()</tt>.
*/
public interface ServiceDataGenerator {
public DIR.ServiceSet getServiceData();
}
public static final long UPDATE_INTERVAL = 60 * 1000; // 60s
public static final long CONCURRENT_RETRY_INTERVAL = 5 * 1000; // 5s
private final ServiceUUID uuid;
private final ServiceDataGenerator serviceDataGen;
private final DIRClient client;
private volatile boolean quit;
private final ServiceConfig config;
private final boolean advertiseUDPEndpoints;
private final String proto;
private String advertisedHostName;
private final UserCredentials uc;
private static final String STATIC_ATTR_PREFIX = "static.";
public static final String STATUS_ATTR = STATIC_ATTR_PREFIX + "status";
/**
* If set to true, a RegisterService call (which is the call used by this
* thread to regularly report at the DIR) will not update the
* last_updated_s field for the service.
* Used by tools like xtfs_chstatus.
*/
public static final String DO_NOT_SET_LAST_UPDATED = STATIC_ATTR_PREFIX + "do_not_set_last_updated";
/**
* Timestamp when the last heartbeat was sent.
*/
private long lastHeartbeat;
/** Guards pauseNumberOfWaitingThreads and paused. */
private final Object pauseLock;
/** While >0, the thread will stop its periodic operations. */
private int pauseNumberOfWaitingThreads;
/** Set to true if the periodic operation is stopped. */
private boolean paused;
private static Auth authNone;
/** Determines if a renewal should take place in the next run of the main loop. **/
private volatile boolean addressMappingRenewalPending = false;
/** Indicates if a renewal has been triggered. **/
private volatile boolean addressMappingRenewalTriggered = false;
/** Used to sleep until the next heartbeat is scheduled. It can be notified to trigger an instant update **/
private Object updateIntervalMonitor = new Object();
static {
authNone = Auth.newBuilder().setAuthType(AuthType.AUTH_NONE).build();
}
public HeartbeatThread(String name, DIRClient client, ServiceUUID uuid, ServiceDataGenerator serviceDataGen,
ServiceConfig config, boolean advertiseUDPEndpoints) {
super(name);
setPriority(Thread.MAX_PRIORITY);
this.pauseLock = new Object();
this.client = client;
this.uuid = uuid;
this.serviceDataGen = serviceDataGen;
this.config = config;
this.advertiseUDPEndpoints = advertiseUDPEndpoints;
this.uc = UserCredentials.newBuilder().setUsername("hb-thread").addGroups("xtreemfs-services")
.build();
if (!config.isUsingSSL()) {
proto = Schemes.SCHEME_PBRPC;
} else {
if (config.isGRIDSSLmode()) {
proto = Schemes.SCHEME_PBRPCG;
} else {
proto = Schemes.SCHEME_PBRPCS;
}
}
if (config.isUsingMultihoming() && config.isUsingRenewalSignal()) {
enableAddressMappingRenewalSignal();
}
this.lastHeartbeat = TimeSync.getGlobalTime();
}
@Override
public void shutdown() {
try {
if (client.clientIsAlive()) {
client.xtreemfs_service_offline(null, authNone, uc, uuid.toString(), 1);
}
} catch (Exception ex) {
Logging.logMessage(Logging.LEVEL_WARN, this, "could not set service offline at DIR");
Logging.logError(Logging.LEVEL_WARN, this, ex);
}
this.quit = true;
this.interrupt();
}
public void initialize() throws IOException {
// initially, ...
try {
// ... for each UUID, ...
for (;;) {
// catch any ConcurrentModificationException and retry
try {
registerServices(-1);
break;
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_EAGAIN) {
if (Logging.isInfo())
Logging.logMessage(Logging.LEVEL_INFO, Category.misc, this,
"concurrent service registration; will try again after %d milliseconds",
CONCURRENT_RETRY_INTERVAL);
} else
throw ex;
}
}
// ... register the address mapping for the service
registerAddressMappings();
} catch (InterruptedException ex) {
} catch (Exception ex) {
Logging.logMessage(Logging.LEVEL_ERROR, this,
"an error occurred while initially contacting the Directory Service: " + ex);
throw new IOException("cannot initialize service at XtreemFS DIR: " + ex, ex);
}
try {
this.setServiceConfiguration();
} catch (Exception e) {
Logging.logMessage(Logging.LEVEL_ERROR, this,
"An error occurred while submitting the service configuration to the DIR service:");
Logging.logError(Logging.LEVEL_ERROR, this, e);
}
}
@Override
public void run() {
try {
notifyStarted();
// periodically, ...
while (!quit) {
synchronized (pauseLock) {
while (pauseNumberOfWaitingThreads > 0) {
pauseLock.wait();
}
paused = false;
}
try {
// update data on DIR; do not retry, as this is done periodically anyway
registerServices(1);
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_EAGAIN) {
if (Logging.isInfo())
Logging.logMessage(Logging.LEVEL_INFO, Category.misc, this,
"concurrent service registration; will try again after %d milliseconds",
UPDATE_INTERVAL);
} else
Logging.logMessage(Logging.LEVEL_ERROR, this,
"An error occurred during the periodic registration at the DIR:");
Logging.logError(Logging.LEVEL_ERROR, this, ex);
} catch (IOException ex) {
Logging.logMessage(Logging.LEVEL_ERROR, this, "periodic registration at DIR failed: %s",
ex.toString());
if (Logging.isDebug())
Logging.logError(Logging.LEVEL_DEBUG, this, ex);
}
if (addressMappingRenewalPending) {
try {
// Reset the flag indicating a renewal has been triggered.
addressMappingRenewalTriggered = false;
// Try to renew the address mappings.
registerAddressMappings();
// If the renewal has been successful, the renewal flag will be reset.
// If an error occurred, the renewal will be retried on the next regular heartbeat.
addressMappingRenewalPending = false;
// Renew the networks list available to the UUIDResolver.
UUIDResolver.renewNetworks();
} catch (IOException ex) {
Logging.logMessage(Logging.LEVEL_ERROR, this,
"requested renewal of address mappings failed: %s", ex.toString());
}
}
if (quit) {
break;
}
synchronized (pauseLock) {
paused = true;
pauseLock.notifyAll();
}
// If no renewal request has been triggered during the loop, this HeartbeatThread can wait for
// the next regular UPDATE_INTERVAL.
if (!addressMappingRenewalTriggered) {
synchronized (updateIntervalMonitor) {
updateIntervalMonitor.wait(UPDATE_INTERVAL);
}
}
}
notifyStopped();
} catch (InterruptedException e) {
notifyStopped();
} catch (Throwable ex) {
notifyCrashed(ex);
}
}
private void registerServices(int numRetries) throws IOException, PBRPCException, InterruptedException {
for (Service reg : serviceDataGen.getServiceData().getServicesList()) {
// retrieve old DIR entry
ServiceSet oldSet = numRetries == -1 ? client.xtreemfs_service_get_by_uuid(null, authNone, uc,
reg.getUuid()) : client.xtreemfs_service_get_by_uuid(null, authNone, uc, reg.getUuid(),
numRetries);
long currentVersion = 0;
Service oldService = oldSet.getServicesCount() == 0 ? null : oldSet.getServices(0);
Map<String, String> staticAttrs = new HashMap();
if (oldService != null) {
currentVersion = oldService.getVersion();
final ServiceDataMap data = oldService.getData();
for (KeyValuePair pair : data.getDataList()) {
if (pair.getKey().startsWith(STATIC_ATTR_PREFIX))
staticAttrs.put(pair.getKey(), pair.getValue());
}
}
if (!staticAttrs.containsKey(STATUS_ATTR))
staticAttrs.put(STATUS_ATTR,
Integer.toString(DIR.ServiceStatus.SERVICE_STATUS_AVAIL.getNumber()));
Service.Builder builder = reg.toBuilder();
builder.setVersion(currentVersion);
final ServiceDataMap.Builder data = ServiceDataMap.newBuilder();
for (Entry<String, String> sAttr : staticAttrs.entrySet()) {
data.addData(KeyValuePair.newBuilder().setKey(sAttr.getKey()).setValue(sAttr.getValue())
.build());
}
// If the service to register is a volume, and a volume with the
// same ID but a different MRC has been registered already, it
// may be necessary to register the volume's MRC as a replica.
// In this case, all keys starting with 'mrc' have to be treated
// separately.
if (reg.getType() == ServiceType.SERVICE_TYPE_VOLUME && oldService != null
&& oldService.getUuid().equals(reg.getUuid())) {
// retrieve the MRC UUID attached to the volume to be
// registered
String mrcUUID = null;
for (KeyValuePair kv : reg.getData().getDataList())
if (kv.getKey().equals("mrc")) {
mrcUUID = kv.getValue();
break;
}
assert (mrcUUID != null);
// check if the UUID is already contained in the volume's
// list of MRCs and determine the next vacant key
int maxMRCNo = 1;
boolean contained = false;
for (KeyValuePair kv : oldService.getData().getDataList()) {
if (kv.getKey().startsWith("mrc")) {
data.addData(kv);
if (kv.getValue().equals(mrcUUID))
contained = true;
if (!kv.getKey().equals("mrc")) {
int no = Integer.parseInt(kv.getKey().substring(3));
if (no > maxMRCNo)
maxMRCNo = no;
}
}
}
// if the UUID is not contained, add it
if (!contained)
data.addData(KeyValuePair.newBuilder().setKey("mrc" + (maxMRCNo + 1)).setValue(mrcUUID));
// add all other key-value pairs
for (KeyValuePair kv : reg.getData().getDataList())
if (!kv.getKey().startsWith("mrc"))
data.addData(kv);
}
// in any other case, all data can be updated
else
data.addAllData(reg.getData().getDataList());
builder.setData(data);
if (numRetries == -1)
client.xtreemfs_service_register(null, authNone, uc, builder.build());
else
client.xtreemfs_service_register(null, authNone, uc, builder.build(), numRetries);
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"%s successfully updated at Directory Service", uuid);
}
// update lastHeartbeat value
this.lastHeartbeat = TimeSync.getGlobalTime();
}
}
private void setServiceConfiguration() throws IOException, PBRPCException, InterruptedException {
Configuration conf = client.xtreemfs_configuration_get(null, authNone, uc, uuid.toString());
long currentVersion = 0;
currentVersion = conf.getVersion();
Configuration.Builder confBuilder = Configuration.newBuilder();
confBuilder.setUuid(uuid.toString()).setVersion(currentVersion);
for (Map.Entry<String, String> mapEntry : config.toHashMap().entrySet()) {
confBuilder.addParameter(KeyValuePair.newBuilder().setKey(mapEntry.getKey())
.setValue(mapEntry.getValue()).build());
}
client.xtreemfs_configuration_set(null, authNone, uc, confBuilder.build());
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"%s successfully send configuration to Directory Service", uuid);
}
}
private void registerAddressMappings() throws InterruptedException, IOException {
List<AddressMapping.Builder> reachableEndpoints = NetUtils.getReachableEndpoints(config.getPort(), proto);
AddressMapping.Builder advertisedEndpoint = null;
// Use the configured hostname or listen.address if they are set for the advertised endpoint.
if (!config.getHostName().isEmpty() || config.getAddress() != null) {
// remove the leading '/' if necessary
String host = config.getHostName().isEmpty() ? config.getAddress().getHostName() : config.getHostName();
if (host.startsWith("/")) {
host = host.substring(1);
}
try {
// see if we can resolve the hostname
InetAddress ia = InetAddress.getByName(host);
} catch (Exception ex) {
Logging.logMessage(Logging.LEVEL_WARN, this, "WARNING! Could not resolve my "
+ "hostname (%s) locally! Please make sure that the hostname is set correctly "
+ "(either on your system or in the service config file). This will lead to "
+ "problems if clients and other OSDs cannot resolve this service's address!\n", host);
}
advertisedEndpoint = AddressMapping.newBuilder().setUuid(uuid.toString()).setVersion(0).setProtocol(proto)
.setAddress(host).setPort(config.getPort()).setTtlS(3600)
.setUri(proto + "://" + host + ":" + config.getPort());
}
// Try to resolve the localHostName and find it in the endpoints to use it as the advertised endpoint if possible.
if (advertisedEndpoint == null) {
try {
InetAddress host = InetAddress.getLocalHost();
String hostAddress = NetUtils.getHostAddress(host);
// Try to find the
for (AddressMapping.Builder mapping : reachableEndpoints) {
if (mapping.getAddress().equals(hostAddress)) {
advertisedEndpoint = mapping;
break;
}
}
} catch (UnknownHostException e) {
Logging.logMessage(Logging.LEVEL_WARN, Category.net, this, "Could not resolve the local hostname.");
}
}
// Use the first mapping from the reachable endpoints. This will be a global address if one exists.
if (advertisedEndpoint == null && reachableEndpoints.size() > 0) {
advertisedEndpoint = reachableEndpoints.get(0);
}
// in case no IP address could be found at all, use 127.0.0.1 for local testing.
if (advertisedEndpoint == null) {
Logging.logMessage(Logging.LEVEL_WARN, Category.net, this,
"Could not find a valid IP address, will use 127.0.0.1 instead.");
advertisedEndpoint = AddressMapping.newBuilder().setAddress("127.0.0.1").setPort(config.getPort())
.setProtocol(proto).setTtlS(3600)
.setUri(NetUtils.getURI(proto, InetAddress.getByName("127.0.0.1"), config.getPort()));
}
// Fetch the latest address mapping version from the Directory Service.
long version = 0;
AddressMappingSet ams = client.xtreemfs_address_mappings_get(null, authNone, uc, uuid.toString());
// Retrieve the version number from the address mapping.
if (ams.getMappingsCount() > 0) {
version = ams.getMappings(0).getVersion();
}
// Set the advertised endpoints version, matching network and uuid.
advertisedEndpoint.setVersion(version).setMatchNetwork("*").setUuid(uuid.toString());
advertisedHostName = advertisedEndpoint.getAddress();
List<AddressMapping.Builder> endpoints = new ArrayList<AddressMapping.Builder>();
endpoints.add(advertisedEndpoint);
if (advertiseUDPEndpoints) {
endpoints.add(NetUtils.cloneMappingForProtocol(advertisedEndpoint, Schemes.SCHEME_PBRPCU));
}
if (config.isUsingMultihoming()) {
for (AddressMapping.Builder mapping : reachableEndpoints) {
// Add all the remaining endpoints not advertised yet.
if (!advertisedEndpoint.getAddress().equals(mapping.getAddress())) {
mapping.setUuid(uuid.toString());
endpoints.add(mapping);
if (advertiseUDPEndpoints) {
endpoints.add(NetUtils.cloneMappingForProtocol(mapping, Schemes.SCHEME_PBRPCU));
}
}
}
}
AddressMappingSet.Builder amsb = AddressMappingSet.newBuilder();
for (AddressMapping.Builder mapping : endpoints) {
amsb.addMappings(mapping);
}
if (Logging.isInfo()) {
Logging.logMessage(Logging.LEVEL_INFO, Category.net, this,
"Registering the following address mappings for the service:");
for (AddressMapping mapping : amsb.getMappingsList()) {
Logging.logMessage(Logging.LEVEL_INFO, Category.net, this, "%s --> %s (%s)", mapping.getUuid(),
mapping.getUri(), mapping.getMatchNetwork());
}
}
// Register or update the current address mapping.
client.xtreemfs_address_mappings_set(null, authNone, uc, amsb.build());
}
/**
* Getter for the timestamp when the last heartbeat was sent.
*
* @return long - timestamp like System.currentTimeMillis() returns it.
*/
public long getLastHeartbeat() {
return this.lastHeartbeat;
}
/**
* @return the advertisedHostName
*/
public String getAdvertisedHostName() {
return advertisedHostName;
}
/**
* Instructs the HeartbeatThread to pause its current operations. Blocks until it has done so.
*
* @remark Do not forget to call {@link #resumeOperation()} afterward or the thread won't be unpaused.
*
* @throws InterruptedException
*/
public void pauseOperation() throws InterruptedException {
synchronized (pauseLock) {
pauseNumberOfWaitingThreads++;
while (!paused) {
try {
pauseLock.wait();
} catch (InterruptedException e) {
// In case of a shutdown, abort.
pauseNumberOfWaitingThreads--;
pauseLock.notifyAll();
throw e;
}
}
}
}
/**
* Tells the HeartbeatThread to resume operation.
*/
public void resumeOperation() {
synchronized (pauseLock) {
pauseNumberOfWaitingThreads--;
pauseLock.notifyAll();
}
}
/**
* Renew the address mappings immediately (HeartbeatThread will wake up when this is called).
*/
public void triggerAddressMappingRenewal() {
addressMappingRenewalPending = true;
addressMappingRenewalTriggered = true;
// To make the changes immediate, the thread has to be notified if it is sleeping.
synchronized (updateIntervalMonitor) {
updateIntervalMonitor.notifyAll();
}
}
/**
* Enable a signal handler for USR2 which will trigger the address mapping renewal.
*
* Since it is possible that certain VMs are using the USR2 signal internally, the server should be started with the
* -XX:+UseAltSigs flag when signal usage is desired.
*
* @throws RuntimeException
*/
private void enableAddressMappingRenewalSignal() {
final HeartbeatThread hbt = this;
// TODO(jdillmann): Test on different VMs and operating systems.
try {
Signal.handle(new Signal("USR2"), new SignalHandler() {
@Override
public void handle(Signal signal) {
// If the HeartbeatThread is still alive, renew the addresses and send them to the DIR.
if (hbt != null) {
hbt.triggerAddressMappingRenewal();
}
}
});
} catch (IllegalArgumentException e) {
Logging.logMessage(Logging.LEVEL_CRIT, this, "Could not register SignalHandler for USR2.");
Logging.logError(Logging.LEVEL_CRIT, null, e);
throw new RuntimeException("Could not register SignalHandler for USR2.", e);
}
}
}

View File

@@ -0,0 +1,60 @@
/*
* Copyright (c) 2010 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.KeyValuePair;
/**
*
* @author bjko
*/
public class KeyValuePairs {
public static String getValue(List<KeyValuePair> list, String key) {
for (KeyValuePair pair : list) {
if (pair.getKey().equals(key))
return pair.getValue();
}
return null;
}
public static void putValue(List<KeyValuePair> list, String key, String value) {
Iterator<KeyValuePair> iter = list.iterator();
while (iter.hasNext()) {
KeyValuePair pair = iter.next();
if (pair.getKey().equals(key))
iter.remove();
}
list.add(KeyValuePair.newBuilder().setKey(key).setValue(value).build());
}
public static List<KeyValuePair> fromMap(Map<String, String> map) {
List<KeyValuePair> list = new ArrayList(map.size());
for (Entry<String, String> e : map.entrySet()) {
list.add(KeyValuePair.newBuilder().setKey(e.getKey()).setValue(e.setValue(null)).build());
}
return list;
}
public static Map<String, String> toMap(List<KeyValuePair> list) {
Map<String, String> map = new HashMap<String, String>();
for (KeyValuePair kv : list)
map.put(kv.getKey(), kv.getValue());
return map;
}
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2010 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common;
/**
*
* @author bjko
*/
public class ReplicaUpdatePolicies {
public static final String REPL_UPDATE_PC_NONE = "";
public static final String REPL_UPDATE_PC_RONLY = "ronly";
public static final String REPL_UPDATE_PC_WARONE = "WaR1";
public static final String REPL_UPDATE_PC_WARA = "WaRa"; // @deprecated as of XtreemFS 1.3.1 and no longer allowed to set. Use WaR1 instead.
public static final String REPL_UPDATE_PC_WQRQ = "WqRq";
/**
* Returns true if the replicaUpdatePolicy is read-write replicated.
*/
public static boolean isRwReplicated(String replicaUpdatePolicy) {
return (replicaUpdatePolicy.equals(REPL_UPDATE_PC_WARA) || replicaUpdatePolicy.equals(REPL_UPDATE_PC_WARONE) || replicaUpdatePolicy
.equals(REPL_UPDATE_PC_WQRQ));
}
}

View File

@@ -0,0 +1,174 @@
/*
* Copyright (c) 2009 by Christian Lorenz,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common;
import java.util.Iterator;
import java.util.concurrent.ConcurrentHashMap;
import org.xtreemfs.common.uuids.ServiceUUID;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
/**
* It manages the availability for all services. If a service could not accessed it is marked as not available
* for a period.<br>
* This class is thread-safe.<br>
* 06.04.2009
*/
public class ServiceAvailability {
public final static int DEFAULT_INITIAL_TIMEOUT = 1000 * 60; // 1 minute
public final static int DEFAULT_CLEANUP_INTERVAL = 1000 * 60 * 60; // 60 minutes
public final static int DEFAULT_MAX_LAST_ACCESS = 1000 * 60 * 60 * 24; // 1 day
/**
* This thread removes services from the list, which were not accessed since a long time.
* 06.04.2009
*/
private class ServiceRemover extends Thread {
final int cleanupInterval;
final private int maxLastAccessTime;
private final ConcurrentHashMap<ServiceUUID, ServiceInfo> serviceAvailability;
boolean quit;
public ServiceRemover(ConcurrentHashMap<ServiceUUID, ServiceInfo> serviceAvailability, int maxLastAccessTime,
int cleanupInterval) {
super("ServiceAvailability Service-Remover");
this.serviceAvailability = serviceAvailability;
this.cleanupInterval = cleanupInterval;
this.maxLastAccessTime = maxLastAccessTime;
this.quit = false;
}
/**
* Shuts the thread down.
*/
public void quitThread() {
this.quit = true;
this.interrupt();
}
/**
* Main loop.
*/
public void run() {
while (!quit) {
Iterator<ServiceInfo> serviceIt = serviceAvailability.values().iterator();
while (serviceIt.hasNext()) {
ServiceInfo service = serviceIt.next();
// osd was not accessed since a long time
if(System.currentTimeMillis() - service.lastAccessTime > maxLastAccessTime)
serviceIt.remove();
}
try {
Thread.sleep(cleanupInterval);
} catch (InterruptedException ex) {
}
}
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.lifecycle, this, "shutdown complete");
}
}
/**
* encapsulates the necessary infos
* 09.04.2009
*/
private static class ServiceInfo {
private long lastAccessTime = 0; // milliseconds
private long lastFailedAccessTime = 0; // milliseconds
private int currentTimeout; // milliseconds
public ServiceInfo(int timeout) {
currentTimeout = timeout;
}
public boolean isAvailable() {
lastAccessTime = System.currentTimeMillis();
return lastFailedAccessTime + currentTimeout <= System.currentTimeMillis();
}
public void lastAccessFailed() {
lastFailedAccessTime = System.currentTimeMillis();
currentTimeout = currentTimeout * 2;
}
}
/**
* makes sure, that obsolete entries in the map will be removed from time to time
*/
private final ServiceRemover removerThread;
/**
* saves the service-timeouts
* key: OSD-UUID
*/
private ConcurrentHashMap<ServiceUUID, ServiceInfo> serviceAvailability;
private final int initialTimeout;
/**
* uses default time intervals
*/
public ServiceAvailability() {
this.serviceAvailability = new ConcurrentHashMap<ServiceUUID, ServiceInfo>();
initialTimeout = DEFAULT_INITIAL_TIMEOUT;
this.removerThread = new ServiceRemover(this.serviceAvailability, DEFAULT_MAX_LAST_ACCESS, DEFAULT_CLEANUP_INTERVAL);
this.removerThread.start();
}
/*
* useful for tests
*/
/**
* all params in milliseconds
*/
public ServiceAvailability(int initialTimeout, int maxLastAccessTime, int cleanupInterval) {
this.serviceAvailability = new ConcurrentHashMap<ServiceUUID, ServiceInfo>();
this.initialTimeout = initialTimeout;
this.removerThread = new ServiceRemover(this.serviceAvailability, maxLastAccessTime, cleanupInterval);
this.removerThread.start();
}
/**
* shutdown of the internal thread
*/
public void shutdown() {
this.removerThread.quitThread();
}
/**
* Checks if the service should be available for access.
* @param service
* @return
*/
public boolean isServiceAvailable(ServiceUUID service) {
if (!serviceAvailability.containsKey(service)) {
serviceAvailability.put(service, new ServiceInfo(initialTimeout));
return true;
} else
return serviceAvailability.get(service).isAvailable();
}
/**
* If a service could not be accessed, you must run this method. So the system can know who is available and
* can manage the timeouts.
* @param service
*/
public void setServiceWasNotAvailable(ServiceUUID service) {
if(!serviceAvailability.containsKey(service))
serviceAvailability.put(service, new ServiceInfo(initialTimeout));
serviceAvailability.get(service).lastAccessFailed();
}
}

View File

@@ -0,0 +1,25 @@
/*
* Copyright (c) 2008 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.auth;
/**
* Thrown by an authentication provide when authentication is not
* possible for any reason.
* @author bjko
*/
public class AuthenticationException extends Exception {
/** creates a new exception.
*
* @param msg an error message that should be meaningful to users!
*/
public AuthenticationException(String msg) {
super(msg);
}
}

View File

@@ -0,0 +1,45 @@
/*
* Copyright (c) 2008 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.auth;
import org.xtreemfs.foundation.pbrpc.channels.ChannelIO;
/**
* Authentication Providers extract the credentials (UID/GIDs/SuperUser) from
* the authentication header and the certificates.
*
* @author bjko
*/
public interface AuthenticationProvider {
/**
* initializes the provider class
*
* @param useSSL
* true, if SSL is enabled.
* @throws java.lang.RuntimeException
* if the provider cannot be initialized.
*/
void initialize(boolean useSSL) throws RuntimeException;
/**
* Get the effective credentials for an operation.
*
* @param ctx
* user credentials sent by the client
* @param channel
* the channel used, can be used to store attachments and to get
* certificates
* @return the effective user credentials
* @throws org.xtreemfs.common.auth.AuthenticationException
* if authentication is not possible
*/
UserCredentials getEffectiveCredentials(org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials ctx, ChannelIO channel) throws AuthenticationException;
}

View File

@@ -0,0 +1,200 @@
/*
* Copyright (c) 2008 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.auth;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.foundation.pbrpc.channels.ChannelIO;
/**
* authentication provider for the Contrail project.
*
* @author PS
*/
public class FederationIdX509AuthProvider implements AuthenticationProvider {
private final static String USER_ID = "CN";
private final static String GROUP_ID = "O";
// String privilegedCertificatePathname = "privileged.txt";
// private HashSet<String> privilegedCertificates;
@Override
public UserCredentials getEffectiveCredentials(
org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials ctx,
ChannelIO channel) throws AuthenticationException {
// use cached info, if present!
if (channel.getAttachment() != null) {
if (Logging.isDebug()) {
Logging.logMessage(
Logging.LEVEL_DEBUG, Category.auth, this, "using attachment...");
}
final UserCredentials creds = (UserCredentials)channel.getAttachment();
if (Logging.isDebug()) {
Logging.logMessage(
Logging.LEVEL_DEBUG, Category.auth, this, "using cached creds: " + creds);
}
return creds;
}
// parse cert if no cached info is present
try {
final Certificate[] certs = channel.getCerts();
if (certs.length > 0) {
final X509Certificate cert = ((X509Certificate) certs[0]);
String fullDN = cert.getSubjectX500Principal().getName();
final List<String> globalUIDs = getNamedElements(
cert.getSubjectX500Principal().getName(), USER_ID);
// only use the UUID of the certificate
String globalUID = null;
if (!globalUIDs.isEmpty()) {
globalUID = globalUIDs.iterator().next();
}
else {
globalUID = fullDN;
}
final List<String> globalGIDs = getNamedElements(
cert.getSubjectX500Principal().getName(), GROUP_ID);
if (globalGIDs.isEmpty()) {
globalGIDs.add(fullDN);
}
if (Logging.isDebug()) {
Logging.logMessage(
Logging.LEVEL_DEBUG, Category.auth, this,
"X.509-User cert present: %s, %s", globalUID, globalGIDs);
}
// the super user is required for the GAFS manager to
// act in behalf of a user to create/delete volumes and
// add policies to volumes
boolean isSuperUser = false;
// for (String privilegedCert : this.privilegedCertificates) {
// if (fullDN.contains(privilegedCert)) {
// isSuperUser = true;
// break;
// }
// }
final UserCredentials creds = new UserCredentials(globalUID, globalGIDs, isSuperUser);
channel.setAttachment(creds);
return creds;
}
else {
throw new AuthenticationException("no X.509-certificates present");
}
} catch (Exception ex) {
Logging.logUserError(Logging.LEVEL_ERROR, Category.auth, this, ex);
throw new AuthenticationException("invalid credentials " + ex);
}
}
private List<String> getNamedElements(String principal, String element) {
String[] elems = principal.split(",");
List<String> elements = new ArrayList<String>();
for (String elem : elems) {
String[] kv = elem.split("=");
if (kv.length == 2
&& kv[0].equals(element)) {
elements.add(kv[1]);
}
}
return elements;
}
public void initialize(boolean useSSL) throws RuntimeException {
if (!useSSL) {
throw new RuntimeException(this.getClass().getName() + " can only be used if SSL is enabled!");
}
// InputStream privilegedCertificatesStream
// = getClass().getClassLoader().getResourceAsStream(this.privilegedCertificatePathname);
// service certs
// this.privilegedCertificates = readHosts(privilegedCertificatesStream);
}
public static HashSet<String> readHosts(InputStream serviceCertificatesStream) {
HashSet<String> serviceCertificates = new HashSet<String>();
if (serviceCertificatesStream == null) {
Logging.logMessage(
Logging.LEVEL_WARN, Category.auth, FederationIdX509AuthProvider.class,
"The list of privileged-certificates does not exist.");
return serviceCertificates;
// throw new RuntimeException("The list of privileged-certificates does not exist");
}
InputStreamReader in = null;
BufferedReader reader = null;
try {
in = new InputStreamReader(serviceCertificatesStream);
reader = new BufferedReader(in);
String line = null;
while ((line = reader.readLine()) != null) {
line.trim();
if (line == null || line.equals("")) {
continue;
}
else {
serviceCertificates.add(line);
Logging.logMessage(Logging.LEVEL_INFO, Category.auth, FederationIdX509AuthProvider.class,
"Adding service-certificate: " + line);
}
}
} catch (FileNotFoundException e) {
Logging.logMessage(
Logging.LEVEL_WARN, Category.auth, FederationIdX509AuthProvider.class,
"The list of privileged-certificates does not exist.");
// throw new RuntimeException(
// "The list of privileged-certificates does not exist.");
} catch (IOException e) {
Logging.logMessage(
Logging.LEVEL_WARN, Category.auth, FederationIdX509AuthProvider.class,
"Could not parse the list of privileged-certificates.");
// throw new RuntimeException(
// "Could not parse the list of privileged-certificates.");
} finally {
try {
if (reader != null) {
reader.close();
}
} catch (IOException e) {
}
}
return serviceCertificates;
}
}

View File

@@ -0,0 +1,32 @@
/*
* Copyright (c) 2008 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.auth;
import org.xtreemfs.foundation.pbrpc.channels.ChannelIO;
/**
* A simple provider that parses the JSON string sent in the authentication
* header as described in the protocol spec.
*
* @author bjko
*/
public class NullAuthProvider implements AuthenticationProvider {
public NullAuthProvider() {
}
public UserCredentials getEffectiveCredentials(org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials ctx, ChannelIO channel)
throws AuthenticationException {
return new UserCredentials(ctx.getUsername(), ctx.getGroupsList(), ctx.getUsername().equals("root"));
}
public void initialize(boolean useSSL) throws RuntimeException {
}
}

View File

@@ -0,0 +1,111 @@
/*
* Copyright (c) 2008 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.auth;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.List;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.foundation.pbrpc.channels.ChannelIO;
/**
* authentication provider for XOS certificates.
*
* @author bjko
*/
public class SimpleX509AuthProvider implements AuthenticationProvider {
private NullAuthProvider nullAuth;
@Override
public UserCredentials getEffectiveCredentials(org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials ctx,
ChannelIO channel) throws AuthenticationException {
// use cached info!
assert (nullAuth != null);
if (channel.getAttachment() != null) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.auth, this, "using attachment...");
final Object[] cache = (Object[]) channel.getAttachment();
final Boolean serviceCert = (Boolean) cache[0];
if (serviceCert) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.auth, this, "service cert...");
return nullAuth.getEffectiveCredentials(ctx, channel);
} else {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.auth, this, "using cached creds: "
+ cache[1]);
return (UserCredentials) cache[1];
}
}
// parse cert if no cached info is present
try {
final Certificate[] certs = channel.getCerts();
if (certs.length > 0) {
final X509Certificate cert = ((X509Certificate) certs[0]);
String fullDN = cert.getSubjectX500Principal().getName();
String commonName = getNameElement(cert.getSubjectX500Principal().getName(), "CN");
if (commonName.startsWith("host/") || commonName.startsWith("xtreemfs-service/")) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.auth, this,
"X.509-host cert present");
channel.setAttachment(new Object[] { new Boolean(true) });
// use NullAuth in this case to parse JSON header
return nullAuth.getEffectiveCredentials(ctx, null);
} else {
final String globalUID = fullDN;
final String globalGID = getNameElement(cert.getSubjectX500Principal().getName(), "OU");
List<String> gids = new ArrayList<String>(1);
gids.add(globalGID);
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.auth, this,
"X.509-User cert present: %s, %s", globalUID, globalGID);
boolean isSuperUser = globalGID.contains("xtreemfs-admin");
final UserCredentials creds = new UserCredentials(globalUID, gids, isSuperUser);
channel.setAttachment(new Object[] { new Boolean(false), creds });
return creds;
}
} else {
throw new AuthenticationException("no X.509-certificates present");
}
} catch (Exception ex) {
Logging.logUserError(Logging.LEVEL_ERROR, Category.auth, this, ex);
throw new AuthenticationException("invalid credentials " + ex);
}
}
private String getNameElement(String principal, String element) {
String[] elems = principal.split(",");
for (String elem : elems) {
String[] kv = elem.split("=");
if (kv.length != 2)
continue;
if (kv[0].equals(element))
return kv[1];
}
return null;
}
public void initialize(boolean useSSL) throws RuntimeException {
if (!useSSL) {
throw new RuntimeException(this.getClass().getName() + " can only be used if SSL is enabled!");
}
nullAuth = new NullAuthProvider();
nullAuth.initialize(useSSL);
}
}

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2008 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.auth;
import java.util.List;
/**
* User credentials.
* @author bjko
*/
public class UserCredentials {
protected String userID;
protected List<String> groupIDs;
protected boolean superUser;
public UserCredentials(String userID,List<String> groupIDs, boolean superUser) {
this.userID = userID;
this.groupIDs = groupIDs;
this.superUser = superUser;
}
public String getUserID() {
return userID;
}
public void setUserID(String userID) {
this.userID = userID;
}
public List<String> getGroupIDs() {
return groupIDs;
}
public void setGroupIDs(List<String> groupIDs) {
this.groupIDs = groupIDs;
}
public boolean isSuperUser() {
return superUser;
}
public void setSuperUser(boolean superUser) {
this.superUser = superUser;
}
}

View File

@@ -0,0 +1,97 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.Volume;
import org.xtreemfs.foundation.logging.Logging;
import java.io.IOException;
import java.util.concurrent.Callable;
/**
* Abstract baseclass for the benchmark classes.
*
* @author jensvfischer
*/
abstract class AbstractBenchmark implements Callable<BenchmarkResult>{
static volatile boolean cancelled = false;
final int requestSize;
final long benchmarkSize;
final Volume volume;
final AdminClient client;
final BenchmarkConfig config;
final VolumeManager volumeManager;
AbstractBenchmark(long benchmarkSize, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
this.client = client;
this.benchmarkSize = benchmarkSize;
this.volume = volumeManager.getNextVolume();
this.config = config;
this.requestSize = config.getChunkSizeInBytes();
this.volumeManager = volumeManager;
this.cancelled = false; // reset cancellation status
}
/*
* Performs a single sequential read- or write-benchmark. Whether a read- or write-benchmark is performed depends on
* which subclass is instantiated. This method is supposed to be called within its own thread to run a benchmark.
*/
BenchmarkResult runBenchmark() throws Exception {
String shortClassname = this.getClass().getName().substring(this.getClass().getName().lastIndexOf('.') + 1);
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Starting %s on volume %s", shortClassname, volume.getVolumeName());
// Setting up
byte[] data = new byte[requestSize];
long numberOfRequests = benchmarkSize / requestSize;
/* Run the AbstractBenchmark */
long before = System.currentTimeMillis();
long requestCounter = performIO(data, numberOfRequests);
long after = System.currentTimeMillis();
if (benchmarkSize != requestCounter)
throw new BenchmarkFailedException("Data written does not equal the requested size");
/* Calculate results */
double timeInSec = (after - before) / 1000.;
BenchmarkResult result = new BenchmarkResult(timeInSec, benchmarkSize, requestCounter);
finalizeBenchmark();
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Finished %s", shortClassname);
return result;
}
/* called before a benchmark thread is started */
abstract void prepareBenchmark() throws Exception;
/*
* Writes or reads the specified amount of data to/from the volume specified in the object initialization. Called
* within the benchmark method.
*/
abstract long performIO(byte[] data, long numberOfBlocks) throws IOException;
/* called at the end of every benchmark */
abstract void finalizeBenchmark() throws Exception;
public static void cancel(){
cancelled = true;
}
@Override
public BenchmarkResult call() throws Exception {
return this.runBenchmark();
}
}

View File

@@ -0,0 +1,857 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.config.PolicyContainer;
import org.xtreemfs.common.config.ServiceConfig;
import org.xtreemfs.common.libxtreemfs.Options;
import org.xtreemfs.foundation.SSLOptions;
import org.xtreemfs.foundation.pbrpc.client.RPCAuthentication;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
/**
* @author jensvfischer
*/
public class BenchmarkConfig extends ServiceConfig {
private static final Parameter[] benchmarkParameter = {
Parameter.DEBUG_LEVEL,
Parameter.DEBUG_CATEGORIES,
Parameter.DIRECTORY_SERVICE,
Parameter.USE_SSL,
Parameter.SERVICE_CREDS_FILE,
Parameter.SERVICE_CREDS_PASSPHRASE,
Parameter.SERVICE_CREDS_CONTAINER,
Parameter.TRUSTED_CERTS_FILE,
Parameter.TRUSTED_CERTS_CONTAINER,
Parameter.TRUSTED_CERTS_PASSPHRASE,
Parameter.TRUST_MANAGER,
Parameter.USE_GRID_SSL_MODE,
Parameter.ADMIN_PASSWORD,
Parameter.BASEFILE_SIZE_IN_BYTES,
Parameter.FILESIZE,
Parameter.USERNAME,
Parameter.GROUP,
Parameter.OSD_SELECTION_POLICIES,
Parameter.REPLICATION_POLICY,
Parameter.REPLICATION_FACTOR,
Parameter.CHUNK_SIZE_IN_BYTES,
Parameter.STRIPE_SIZE_IN_BYTES,
Parameter.STRIPE_SIZE_SET,
Parameter.STRIPE_WIDTH,
Parameter.STRIPE_WIDTH_SET,
Parameter.NO_CLEANUP,
Parameter.NO_CLEANUP_VOLUMES,
Parameter.NO_CLEANUP_BASEFILE,
Parameter.OSD_CLEANUP
};
private Options options;
private Map<String, String> policyAttributes;
private SSLOptions sslOptions = null;
private BenchmarkConfig(Properties props, Options options, Map<String, String> policyAttributes) {
super(props);
read();
this.options = options;
this.policyAttributes = policyAttributes;
}
private void setDefaults() {
super.setDefaults(benchmarkParameter);
}
private void read() {
for(Parameter param : benchmarkParameter) {
parameter.put(param, readParameter(param));
}
}
public static Parameter[] getBenchmarkParameter() {
return benchmarkParameter;
}
/**
* Set the stripe size on an existing {@link BenchmarkConfig} object.
*
* @param size
*/
public void setStripeSizeInBytes(Integer size) {
parameter.put(Parameter.STRIPE_SIZE_IN_BYTES, size);
}
/**
* Set the stripe size on an existing {@link BenchmarkConfig} object.
*
* @param size
*/
public void setStripeWidth(Integer size) {
parameter.put(Parameter.STRIPE_WIDTH, size);
}
/**
* Get the size of the basefile for random benchmarks. <br/>
* The basefile is a huge file to/from which the random benchmarks write/read. <br/>
* Default: 3 GiB.
* @return the size of the basefile for random benchmarks
*/
public Long getBasefileSizeInBytes(){
return (Long) parameter.get(Parameter.BASEFILE_SIZE_IN_BYTES);
}
/**
* Get the size of files in filebased benchmarks. <br/>
* Filebased benchmarks write/read a huge number of files. <br/>
* Default: 4 KiB.
* @return the size of files in filebased benchmarks
*/
public Integer getFilesize(){
return (Integer) parameter.get(Parameter.FILESIZE);
}
/**
* Get the username to be used when creating files and volumes <br/>
* Default: benchmark.
* @return the username
*/
public String getUsername(){
return (String) parameter.get(Parameter.USERNAME);
}
/**
* Get the group to be used when creating files and volumes. <br/>
* Default: benchmark.
* @return the group
*/
public String getGroup(){
return (String) parameter.get(Parameter.GROUP);
}
/**
* Get the RPC user credentials, created from the username and group infos.
* @return the RPC user credentials
*/
public RPC.UserCredentials getUserCredentials() {
return RPC.UserCredentials.newBuilder().setUsername((String) parameter.get(Parameter.USERNAME))
.addGroups((String) parameter.get(Parameter.USERNAME)).build();
}
/**
* Get the libxtreemfs {@link org.xtreemfs.common.libxtreemfs.Options}.
*
* @return the libxtreemfs {@link org.xtreemfs.common.libxtreemfs.Options}
*/
public Options getOptions(){
return options;
}
/**
* Get the {@link SSLOptions} for the SSL Authetification Provider. <br/>
* Default: null.
* @return the {@link SSLOptions}
*/
public SSLOptions getSslOptions() throws IOException, InstantiationException, IllegalAccessException,
ClassNotFoundException {
if (isUsingSSL()) {
if (null == sslOptions) {
sslOptions = new SSLOptions(
new FileInputStream(this.getServiceCredsFile()),
this.getServiceCredsPassphrase(),
this.getServiceCredsContainer(),
new FileInputStream(
this.getTrustedCertsFile()),
this.getTrustedCertsPassphrase(),
this.getTrustedCertsContainer(),
false,
this.isGRIDSSLmode(),
this.getSSLProtocolString(),
new PolicyContainer(this).getTrustManager()
);
}
}
return sslOptions;
}
/**
* Get the OSD selection policies used when creating or opening volumes. <br/>
*
* Default: No policy is set. If an existing volume is used this means, that already set policies of the volume are
* used. If a new volume is created, the defaults are used ("1000,3002": OSD filter, Shuffling).
*
* @return the OSD selection policies
*/
public String getOsdSelectionPolicies(){
return (String) parameter.get(Parameter.OSD_SELECTION_POLICIES);
}
/**
* Get the policy attributes for OSD selection policies. <p/>
* The attributes are set when the volumes are created / opened. <br/>
* A policy attribute consists of the name of the attribute, and the value the attribute is set to. For more
* information see the XtreemFS User Guide. <br/>
*
* Attribute Format: <policy id>.<attribute name> e.g., "1002.uuids" <br/>
* Value format: <value>, e.g. "osd01"
*
* @return the policy attributes
*/
public Map<String, String> getPolicyAttributes(){
return this.policyAttributes;
}
/**
* Get the default replication policy (used when creating or opening volumes). <br/>
* As the {@code replicationFlags} in
* {@link org.xtreemfs.common.libxtreemfs.Volume#setDefaultReplicationPolicy(org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials, String, String, int, int)}
* is set to 0, this is only intended for write/read replication. <br/>
*
* Default: No policy is set. If an existing volume is used this means, that a already set policy of the volume is
* used. If a new volume is created, the defaults (no replication policy) is used.
*
* @return the replication policy
*/
public String getReplicationPolicy(){
return (String) parameter.get(Parameter.REPLICATION_POLICY);
}
/**
* Get the replication factor for the replication policy. <br/>
* Only used when explicitly setting a replication policy for a volume. <br/>
*
* Default: 3 (min for WqRq)
*
* @return the replication policy
*/
public Integer getReplicationFactor(){
return (Integer) parameter.get(Parameter.REPLICATION_FACTOR);
}
/**
* Get the chunk size for reads/writes in benchmarks. <br/>
* The chuck size is the amount of data written/ret in one piece. <br/>
* Default: 131072 bytes (128 KiB).
*
* @return the chunk size
*/
public Integer getChunkSizeInBytes(){
return (Integer) parameter.get(Parameter.CHUNK_SIZE_IN_BYTES);
}
/**
* Get the size of an OSD storage block ("blocksize") in bytes. <br/>
* Used when creating or opening volumes. <br/>
*
* When opening existing volumes, by default, the stripe size of the given volume is used. When creating a new
* volume, by default a stripe size of 131072 bytes (128 KiB) is used. <br/>
* @return the blocksize
*/
public Integer getStripeSizeInBytes(){
return (Integer) parameter.get(Parameter.STRIPE_SIZE_IN_BYTES);
}
/**
* Get the size of an OSD storage block ("blocksize") in kibibytes. <br/>
*
* @return the blocksize in KiB
*/
public Integer getStripeSizeInKiB() {
return getStripeSizeInBytes()/1024;
}
/**
* Indicates, whether a stripeSize was explicitly set via this config, or if the default values where used.
* @return true, if stripeSize was explicitly set
*/
public Boolean isStripeSizeSet(){
return (Boolean) parameter.get(Parameter.STRIPE_SIZE_SET);
}
/**
* Get the maximum number of OSDs a file is distributed to. Used when creating or opening volumes <br/>
*
* When opening existing volumes, by default, the stripe width of the given volume is used. When creating a new
* volume, by default a stripe width of 1 is used. <br/>
*
* @return the maximum number of OSDs a file is distributed to
*/
public Integer getStripeWidth(){
return (Integer) parameter.get(Parameter.STRIPE_WIDTH);
}
/**
* Indicates, whether a stripeWidth was explicitly set via this config, or if the default values where used.
* @return
*/
public Boolean isStripeWidthSet(){
return (Boolean) parameter.get(Parameter.STRIPE_WIDTH_SET);
}
/**
* Indicates whether the files and volumes created during the benchmarks will be deleted. <br/>
* Default: false.
* @return true, if created files and volumes are not to be deleted
*/
public Boolean isNoCleanup(){
return (Boolean) parameter.get(Parameter.NO_CLEANUP);
}
/**
* Indicates, whether the volumes created during the benchmarks will be deleted. <br/>
* Default: false.
* @return true, if created volumes are not to be deleted
*/
public Boolean isNoCleanupVolumes(){
return (Boolean) parameter.get(Parameter.NO_CLEANUP_VOLUMES);
}
/**
* Indicates, whether the basefile created during the random benchmarks will be deleted. <br/>
* Default: false.
* @return true, if a created basefile is not to be deleted
*/
public Boolean isNoCleanupBasefile(){
return (Boolean) parameter.get(Parameter.NO_CLEANUP_BASEFILE);
}
/**
* Indicates, whether a OSD Cleanup will be done at the end of all benchmarks. This might be needed to actually delete
* the storage blocks from the OSD after deleting volumes. <br/>
* Default: false.
* @return true, if a cleanup is to be performed
*/
public Boolean isOsdCleanup(){
return (Boolean) parameter.get(Parameter.OSD_CLEANUP);
}
/**
* Get addresses of the DIR Servers. <br/>
* Default: 127.0.0.1:32638
* @return
*/
public String[] getDirAddresses() {
InetSocketAddress[] directoryServices = getDirectoryServices();
String[] dirAddresses = new String[directoryServices.length];
for (int i =0; i<dirAddresses.length; i++)
dirAddresses[i] = directoryServices[i].getAddress().getHostAddress() + ":" + directoryServices[i].getPort();
return dirAddresses;
}
/**
*
* @return
*/
public RPC.Auth getAuth() {
RPC.Auth auth;
if (getAdminPassword().equals(""))
auth = RPCAuthentication.authNone;
else {
RPC.AuthPassword password = RPC.AuthPassword.newBuilder().setPassword(getAdminPassword()).build();
auth = RPC.Auth.newBuilder().setAuthType(RPC.AuthType.AUTH_PASSWORD).setAuthPasswd(password).build();
}
return auth;
}
/**
* Return a new builder to build a {@link BenchmarkConfig} object.
*
* @return a new builder
*/
public static ConfigBuilder newBuilder(){
return new ConfigBuilder();
}
/**
* Builder for the {@link BenchmarkConfig} datastructure.
* <p/>
*
* Use like this: <br/>
* <code>
* BenchmarkConfig.ConfigBuilder builder = BenchmarkConfig.newBuilder();<br/>
* builder.setX("x"); <br/>
* builder.setY("y"); <br/>
* BenchmarkConfig config = builder.build(); <br/>
* </code> or like this <br/>
* <code>
* BenchmarkConfig config = BenchmarkConfig.newBuilder().setX("x").setY("y").build();<br/>
* </code>
* <p/>
* The {@link Controller} and the {@link BenchmarkConfig} represent the API to the benchmark library.
*
* @author jensvfischer
*/
public static class ConfigBuilder {
private Properties props = new Properties();
private ServiceConfig parent;
private Map<String, String> policyAttributes = new HashMap<String, String>();
private Options options = new Options();
/**
* Instantiate an builder (all values are the default values, see {@link BenchmarkConfig}).
*/
private ConfigBuilder() {
}
/**
* Set the size of the basefile for random benchmarks. <br/>
* Default: 3 GiB.
*
* @param basefileSizeInBytes
* @return the builder
*/
public ConfigBuilder setBasefileSizeInBytes(long basefileSizeInBytes) {
if (basefileSizeInBytes < 1)
throw new IllegalArgumentException("basefileSizeInBytes < 1 not allowed");
props.setProperty(Parameter.BASEFILE_SIZE_IN_BYTES.getPropertyString(),
Long.toString(basefileSizeInBytes));
return this;
}
/**
* Set the size of files in filebased benchmark. <br/>
* Default: 4 KiB.
*
* @param filesize
* @return the builder
*/
public ConfigBuilder setFilesize(int filesize) {
if (filesize < 1)
throw new IllegalArgumentException("filesize < 1 not allowed");
props.setProperty(Parameter.FILESIZE.getPropertyString(), Integer.toString(filesize));
return this;
}
/**
* Set the username to be used when creating files and volumes <br/>
* Default: benchmark.
*
* @param userName
* @return the builder
*/
public ConfigBuilder setUserName(String userName) {
if (userName.isEmpty())
throw new IllegalArgumentException("Empty username not allowed");
props.setProperty(Parameter.USERNAME.getPropertyString(), userName);
return this;
}
/**
* Set the group to be used when creating files and volumes. <br/>
* Default: benchmark.
*
* @param group
* @return the builder
*/
public ConfigBuilder setGroup(String group) {
if (group.isEmpty())
throw new IllegalArgumentException("Empty group name not allowed");
props.setProperty(Parameter.USERNAME.getPropertyString(), group);
return this;
}
/**
* Set the password for accessing the osd(s) <br/>
* Default: "".
*
* @param adminPassword
* @return the builder
*/
public ConfigBuilder setAdminPassword(String adminPassword) {
props.setProperty(Parameter.ADMIN_PASSWORD.getPropertyString(), adminPassword);
return this;
}
/**
* Set the address of the DIR Server. <br/>
* Default: 127.0.0.1:32638
*
* @param dirAddress
* @return the builder
*/
public ConfigBuilder setDirAddress(String dirAddress) {
return setDirAddresses(new String[]{dirAddress});
}
/**
* Set the addresses of the DIR Servers. <br/>
* Default: 127.0.0.1:32638
*
* @param dirAddresses
* @return the builder
*/
public ConfigBuilder setDirAddresses(String[] dirAddresses) {
int i =-1;
for (String dirAddress : dirAddresses) {
/* remove protocol information */
if (dirAddress.contains("://"))
dirAddress = dirAddress.split("://", 2)[1];
/* remove trailing slashes */
if (dirAddress.endsWith("/"))
dirAddress = dirAddress.substring(0, dirAddress.length() - 1);
/* split address in host and port */
String host;
String port;
try {
host = dirAddress.split(":")[0];
port = dirAddress.split(":")[1];
} catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(
"DIR Address needs to contain a host and a port, separated by \":\" (was: \"" + dirAddress
+ "\").");
}
if (dirAddresses.length == 1 || -1 == i) {
props.setProperty("dir_service.host", host);
props.setProperty("dir_service.port", port);
} else {
props.setProperty("dir_service." + i + ".host", host);
props.setProperty("dir_service." + i + ".port", port);
}
i++;
}
return this;
}
/**
* Set the libxtreemfs {@link org.xtreemfs.common.libxtreemfs.Options}.
*
* @param options
* @return the builder
*/
public ConfigBuilder setOptions(Options options) {
this.options = options;
return this;
}
/**
* Set the SSL options for SSL Authetification Provider. <br/>
* Default: null.
*
* @param useSSL set to true to use SSL, false otherwise
* @param useGridSSL set to true to use GridSSL, false otherwise
* @param serviceCredsFile the pkcs12 file with the ssl user certificate
* @param serviceCredsFile the passphrase for the user certificate
* @param trustedCAsFile jks truststore with the CA
* @param trustedCAsPass passphrase for the jks truststore
*/
public ConfigBuilder setSslOptions(boolean useSSL, boolean useGridSSL, String serviceCredsFile,
String serviceCredsPass, String trustedCAsFile, String trustedCAsPass) {
props.setProperty(Parameter.USE_SSL.getPropertyString(), Boolean.toString(useSSL));
props.setProperty(Parameter.USE_GRID_SSL_MODE.getPropertyString(), Boolean.toString(useGridSSL));
props.setProperty(Parameter.SERVICE_CREDS_FILE.getPropertyString(), serviceCredsFile);
props.setProperty(Parameter.SERVICE_CREDS_PASSPHRASE.getPropertyString(), serviceCredsPass);
props.setProperty(Parameter.SERVICE_CREDS_CONTAINER.getPropertyString(), SSLOptions.PKCS12_CONTAINER);
props.setProperty(Parameter.TRUSTED_CERTS_FILE.getPropertyString(), trustedCAsFile);
props.setProperty(Parameter.TRUSTED_CERTS_CONTAINER.getPropertyString(), SSLOptions.JKS_CONTAINER);
props.setProperty(Parameter.TRUSTED_CERTS_PASSPHRASE.getPropertyString(), trustedCAsPass);
return this;
}
/**
* Set the OSD selection policies used when creating or opening volumes. <br/>
*
* Default: No policy is set. If an existing volume is used this means, that already set policies of the volume are
* used. If a new volume is created, the defaults are use ("1000,3002": OSD filter, Shuffling).
*
* @param policies
* @return the builder
*/
public ConfigBuilder setOsdSelectionPolicies(String policies) {
props.setProperty(Parameter.OSD_SELECTION_POLICIES.getPropertyString(), policies);
return this;
}
/**
* Set a policy attribute for a OSD selection policies. <p/>
* This method can be called multiple times, if multiple attributes are to be set. <br/>
* The attributes are set when the volumes are created / opened. <br/>
* A policy attribute consists of the name of the attribute, and the value the attribute is set to. For more information see the XtreemFS User Guide. <br/>
*
* Attribute Format: <policy id>.<attribute name> e.g., "1002.uuids" <br/>
* Value format: <value>, e.g. "osd01"
*
* @param attribute the attribute to be set
* @param value the value the attribute is set to
* @return the builder
*/
public ConfigBuilder setPolicyAttribute(String attribute, String value) {
this.policyAttributes.put(attribute, value);
return this;
}
/**
* Set the UUID-based filter policy (ID 1002) as OSD selection policy and set the uuids to be used by the policy
* (applied when creating/opening the volumes). It is a shortcut for setting the policy and the attributes manually. <br/>
*
* Default: see {@link #setOsdSelectionPolicies(String)}.
*
* @param uuids
* the uuids of osds to be used
* @return the builder
*/
public ConfigBuilder setSelectOsdsByUuid(String uuids) {
String key = Parameter.OSD_SELECTION_POLICIES.getPropertyString();
String osdSelectionPolicies = props.getProperty(key);
if (null == osdSelectionPolicies)
props.setProperty(key, "1002");
else
props.setProperty(key, osdSelectionPolicies+",1002");
this.policyAttributes.put("1002.uuids", uuids);
return this;
}
/**
* Set the default replication policy, used when creating or opening volumes. <br/>
* As the {@code replicationFlags} in
* {@link org.xtreemfs.common.libxtreemfs.Volume#setDefaultReplicationPolicy(org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials, String, String, int, int)}
* is set to 0, this is only intended for write/read replication. <br/>
*
* Default: No policy is set. If an existing volume is used this means, that a already set policy of the volume is
* used. If a new volume is created, the default (no replication) is used.
*
* @param policy
* @return the builder
*/
public ConfigBuilder setReplicationPolicy(String policy) {
props.setProperty(Parameter.REPLICATION_POLICY.getPropertyString(), policy);
return this;
}
/**
* Set the replication factor for the replication policy. <br/>
* Only used when explicitly setting a replication policy for a volume. <br/>
*
* Default: 3 (min for WqRq)
*
* @param replicationFactor
* @return the builder
*/
public ConfigBuilder setReplicationFactor(int replicationFactor) {
props.setProperty(Parameter.REPLICATION_FACTOR.getPropertyString(), Integer.toString(replicationFactor));
return this;
}
/**
* Set the chunk size for reads/writes in benchmarks. The chuck size is the amount of data written/ret in one piece. <br/>
*
* Default: 131072 bytes (128 KiB).
*
* @param chunkSizeInBytes the chunk size in bytes
* @return the builder
*/
public ConfigBuilder setChunkSizeInBytes(int chunkSizeInBytes) {
props.setProperty(Parameter.CHUNK_SIZE_IN_BYTES.getPropertyString(), Integer.toString(chunkSizeInBytes));
return this;
}
/**
* Set the size of an OSD storage block ("blocksize") in bytes when creating or opening volumes. <br/>
*
* When opening existing volumes, by default, the stripe size of the given volume is used. When creating a new
* volume, by default a stripe size of 131072 bytes (128 KiB) is used. <br/>
*
* @param stripeSizeInBytes
* the stripe size in bytes
* @return the builder
*/
public ConfigBuilder setStripeSizeInBytes(int stripeSizeInBytes) {
props.setProperty(Parameter.STRIPE_SIZE_IN_BYTES.getPropertyString(), Integer.toString(stripeSizeInBytes));
props.setProperty(Parameter.STRIPE_SIZE_SET.getPropertyString(), Boolean.toString(true));
return this;
}
/**
* Set the maximum number of OSDs a file is distributed to. Used when creating or opening volumes <br/>
*
* When opening existing volumes, by default, the stripe width of the given volume is used. When creating a new
* volume, by default a stripe width of 1 is used. <br/>
* @return the builder
*/
public ConfigBuilder setStripeWidth(int stripeWidth) {
props.setProperty(Parameter.STRIPE_WIDTH.getPropertyString(), Integer.toString(stripeWidth));
props.setProperty(Parameter.STRIPE_WIDTH_SET.getPropertyString(), Boolean.toString(true));
return this;
}
/**
* If set, the files and volumes created during the benchmarks will not be deleted. <br/>
* Default: false.
*
* @return the builder
*/
public ConfigBuilder setNoCleanup() {
props.setProperty(Parameter.NO_CLEANUP.getPropertyString(), Boolean.toString(true));
return this;
}
/**
* If set, the volumes created during the benchmarks will not be deleted. <br/>
* Default: false.
*
* @return the builder
*/
public ConfigBuilder setNoCleanupVolumes() {
props.setProperty(Parameter.NO_CLEANUP_VOLUMES.getPropertyString(), Boolean.toString(true));
return this;
}
/**
* If set, a basefile created during benchmarks will not be deleted. <br/>
* Default: false.
*
* @return the builder
*/
public ConfigBuilder setNoCleanupBasefile() {
props.setProperty(Parameter.NO_CLEANUP_BASEFILE.getPropertyString(), Boolean.toString(true));
return this;
}
/**
* If set, a OSD Cleanup will be done at the end of all benchmarks. This might be needed to actually delete
* the storage blocks from the OSD after deleting volumes. <br/>
* Default: false.
*
* @return the builder
*/
public ConfigBuilder setOsdCleanup() {
props.setProperty(Parameter.OSD_CLEANUP.getPropertyString(), Boolean.toString(true));
return this;
}
/**
* If set, the {@link BenchmarkConfig} will be constructed by using as many parameters as possible from the parent
* config. <p/>
* Only parameters which weren't set at the current builder object (at the moment of the call to
* {@link org.xtreemfs.common.benchmark.BenchmarkConfig.ConfigBuilder#build()} will be taken from the parent.
*
* @param parent
* @return
*/
public ConfigBuilder setParent(ServiceConfig parent) {
this.parent = parent;
return this;
}
/**
* Build the {@link BenchmarkConfig} object.
*
* @return the build {@link BenchmarkConfig} object
* @throws Exception
*/
public BenchmarkConfig build() throws Exception {
verifyNoCleanup();
if (null != this.parent)
mergeParent();
/*
* if no DirAddress is given, either directly or in the parent config, first try the DefaultDirConfig, then
* use default
*/
if (null == props.getProperty("dir_service.host")) {
String[] dirAddresses = Controller.getDefaultDir();
if (null != dirAddresses)
setDirAddresses(dirAddresses);
else
setDirAddresses(new String[]{"127.0.0.1:32638"});
}
BenchmarkConfig config = new BenchmarkConfig(props, this.options, this.policyAttributes);
config.setDefaults();
return config;
}
private void verifyNoCleanup() {
boolean noCleanupBasefile, noCleanup, noCleanupVolumes;
noCleanupBasefile = Boolean.parseBoolean(props.getProperty(Parameter.NO_CLEANUP_BASEFILE.getPropertyString()));
noCleanup = Boolean.parseBoolean(props.getProperty(Parameter.NO_CLEANUP.getPropertyString()));
noCleanupVolumes = Boolean.parseBoolean(props.getProperty(Parameter.NO_CLEANUP_VOLUMES.getPropertyString()));
if (noCleanupBasefile && !noCleanup && !noCleanupVolumes)
throw new IllegalArgumentException("noCleanupBasefile only works with noCleanup or noCleanupVolumes");
}
/* Merge props with parent props. Current props has precedence over parent */
private void mergeParent() {
HashMap<String, String> parentParameters = parent.toHashMap();
for (Map.Entry<String, String> parentEntry : parentParameters.entrySet()) {
String parentKey = parentEntry.getKey();
String parentValue = parentEntry.getValue();
/* only set hitherto unset properties */
if (null == props.getProperty(parentKey)) {
/* Special handling for properties of type InetSocketAddress*/
Class parentClass = ServiceConfig.Parameter.getParameterFromString(parentKey).getPropertyClass();
if (parentClass == InetSocketAddress.class) {
setAddressSocketProperty(parentKey, parentValue);
} else {
props.setProperty(parentKey, parentValue);
}
}
}
}
/*
* Handles properties of type InetSocketAddress
*
* Because of the String casting on InetSocketAddresses involved in parsing the configs, one ends up with a
* string looking like "/127.0.0.1:32638" or "localhost/127.0.0.1:32638". Additionally, when handling
* InetSocketAddresses, the ServiceConfig needs as input properties in the form of property.host and
* property.port (while outputting them in only as property.host in a "[hostname]/ip:port" format).
*
* @param parentKey
*
* @param parentValue
*/
private void setAddressSocketProperty(String parentKey, String parentValue) {
/*
* Ensure the format of the casted InetSocketAddress, so the string manipulations below work. Allowed format:
* "[hostname]/ipadress:port"
*/
String pattern = "[a-z]*/[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+:[0-9]+";
if (!parentValue.matches(pattern))
throw new IllegalArgumentException("Unknown address format for DIR adress [was: " + parentValue
+ ". allowed: [hostname]/]");
/* Remove (optional) hostname part and the "/" separator. Only IP is used to reconstruct the property. */
String address = parentValue.split("/")[1];
/* split in IP and port */
String hostIP = address.split(":")[0];
String port = address.split(":")[1];
props.setProperty(parentKey, hostIP);
props.setProperty(parentKey.replace("host", "port"), port);
}
}
}

View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
/**
* Instantiates a benchmark dependig on the BenchmarkType.
*
* @author jensvfischer
*/
class BenchmarkFactory {
static AbstractBenchmark createBenchmark(long size, BenchmarkUtils.BenchmarkType benchmarkType, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager)
throws Exception {
AbstractBenchmark benchmark = null;
switch (benchmarkType) {
case SEQ_WRITE:
benchmark = new SequentialWriteBenchmark(size, config, client, volumeManager);
break;
case SEQ_UNALIGNED_WRITE:
benchmark = new UnalignedSequentialWriteBenchmark(size, config, client, volumeManager);
break;
case SEQ_READ:
benchmark = new SequentialReadBenchmark(size, config, client, volumeManager);
break;
case RAND_WRITE:
benchmark = new RandomWriteBenchmark(size, config, client, volumeManager);
break;
case RAND_READ:
benchmark = new RandomReadBenchmark(size, config, client, volumeManager);
break;
case FILES_WRITE:
benchmark = new FilebasedWriteBenchmark(size, config, client, volumeManager);
break;
case FILES_READ:
benchmark = new FilebasedReadBenchmark(size, config, client, volumeManager);
break;
}
return benchmark;
}
}

View File

@@ -0,0 +1,70 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
/**
* @author jensvfischer
*/
public class BenchmarkFailedException extends Exception {
/**
* Constructs a new exception with {@code null} as its detail message.
* The cause is not initialized, and may subsequently be initialized by a
* call to {@link #initCause}.
*/
public BenchmarkFailedException() {
}
/**
* Constructs a new exception with the specified detail message. The
* cause is not initialized, and may subsequently be initialized by
* a call to {@link #initCause}.
*
* @param message the detail message. The detail message is saved for
* later retrieval by the {@link #getMessage()} method.
*/
public BenchmarkFailedException(String message) {
super(message);
}
/**
* Constructs a new exception with the specified detail message and
* cause. <p>Note that the detail message associated with
* {@code cause} is <i>not</i> automatically incorporated in
* this exception's detail message.
*
* @param message the detail message (which is saved for later retrieval
* by the {@link #getMessage()} method).
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
* @since 1.4
*/
public BenchmarkFailedException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a new exception with the specified cause and a detail
* message of <tt>(cause==null ? null : cause.toString())</tt> (which
* typically contains the class and detail message of <tt>cause</tt>).
* This constructor is useful for exceptions that are little more than
* wrappers for other throwables (for example, {@link
* java.security.PrivilegedActionException}).
*
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
* @since 1.4
*/
public BenchmarkFailedException(Throwable cause) {
super(cause);
}
}

View File

@@ -0,0 +1,145 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import static org.xtreemfs.common.benchmark.BenchmarkUtils.BenchmarkType;
/**
* Result object for benchmarks.
*
* @author jensvfischer
*/
public class BenchmarkResult implements Comparable<BenchmarkResult> {
private BenchmarkType benchmarkType;
private int numberOfReadersOrWriters;
private double timeInSec;
private long requestedSize;
private long actualSize;
private boolean failed;
private Throwable error;
public BenchmarkResult(double timeInSec, long requestedSize, long actualSize) {
this.timeInSec = timeInSec;
this.requestedSize = requestedSize;
this.actualSize = actualSize;
this.failed = false;
}
public BenchmarkResult(double timeInSec, long actualSize, int numberOfReadersOrWriters, BenchmarkType benchmarkType) {
this.benchmarkType = benchmarkType;
this.numberOfReadersOrWriters = numberOfReadersOrWriters;
this.timeInSec = timeInSec;
this.actualSize = actualSize;
}
public BenchmarkResult(Throwable error) {
this.failed = true;
this.error = error;
}
void setBenchmarkType(BenchmarkType benchmarkType) {
this.benchmarkType = benchmarkType;
}
void setNumberOfReadersOrWriters(int numberOfReadersOrWriters) {
this.numberOfReadersOrWriters = numberOfReadersOrWriters;
}
/**
* Get the type of the benchmark.
* <p/>
*
* The benchmark type is one of the following:
*
* <ul>
* <li>SEQ_WRITE: Sequential write benchmark</li>
* <li>SEQ_READ: Sequential read benchmark</li>
* <li>RAND_WRITE: Random write benchmark</li>
* <li>RAND_READ: Random read benchmark</li>
* <li>FILES_WRITE: Filebased write benchmark</li>
* <li>FILES_READ: Filebased read benchmark</li>
* </ul>
*
*
* @return the type of the benchmark
*/
public BenchmarkType getBenchmarkType() {
return benchmarkType;
}
/**
* Get the number of parallel benchmark threads.
*
* @return the number of parallel benchmark threads
*/
public int getNumberOfReadersOrWriters() {
return numberOfReadersOrWriters;
}
/**
* Get the number of seconds the benchmark run took.
*
* @return the number of seconds the benchmark run took
*/
public double getTimeInSec() {
return timeInSec;
}
/**
* Get the size, the benchmark was requested to write or read.
*
* @return the benchmark size in bytes
*/
public long getRequestedSize() {
return requestedSize;
}
/**
* Get the count of requests actually written or red by the benchmark (should be equal to {@link #getRequestedSize()}).
*
* @return the number of requests written or red by the benchmark
*/
public long getActualSize() {
return actualSize;
}
public Throwable getError() {
return error;
}
/**
* Returns true, if the benchmark is either a sequential write benchmark, a random write benchmark or a filebased write benchmark.
*
* @return true, if the benchmark is a write benchmark
*/
public boolean isWriteBenchmark(){
return benchmarkType == BenchmarkType.SEQ_WRITE || benchmarkType == BenchmarkType.RAND_WRITE
|| benchmarkType == BenchmarkType.FILES_WRITE;
}
/**
* Returns true, if the benchmark is either a sequential read benchmark, a random read benchmark or a filebased read benchmark.
*
* @return true, if the benchmark is a read benchmark
*/
public boolean isReadBenchmark(){
return benchmarkType == BenchmarkType.SEQ_READ || benchmarkType == BenchmarkType.RAND_READ
|| benchmarkType == BenchmarkType.FILES_READ;
}
public boolean isFailed() {
return failed;
}
@Override
public int compareTo(BenchmarkResult otherResult) {
return benchmarkType.toString().compareTo(otherResult.benchmarkType.toString());
}
}

View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
/**
* Util class for the Benchmarks
*
* @author jensvfischer
*
*/
public class BenchmarkUtils {
public static final int KiB_IN_BYTES = 1024;
public static final int MiB_IN_BYTES = 1024 * 1024;
public static final int GiB_IN_BYTES = 1024 * 1024 * 1024;
/**
* Enum for the different benchmark Types.
*/
public static enum BenchmarkType {
SEQ_WRITE, SEQ_UNALIGNED_WRITE, SEQ_READ, RAND_WRITE, RAND_READ, FILES_WRITE, FILES_READ
}
}

View File

@@ -0,0 +1,65 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.Client;
import org.xtreemfs.common.libxtreemfs.ClientFactory;
import org.xtreemfs.foundation.logging.Logging;
import java.util.LinkedList;
/**
* Handles client creation, startup and deletion centrally.
* <p/>
* getNewClient() can be used to get an already started client. shutdownClients() is used to shutdown all clients
* created so far.
*
* @author jensvfischer
*/
class ClientManager {
private LinkedList<AdminClient> clients;
private BenchmarkConfig config;
ClientManager(BenchmarkConfig config) {
this.clients = new LinkedList<AdminClient>();
this.config = config;
}
/* create and start an AdminClient. */
AdminClient getNewClient() throws Exception {
AdminClient client = ClientFactory.createAdminClient(config.getDirAddresses(), config.getUserCredentials(),
config.getSslOptions(), config.getOptions());
clients.add(client);
client.start();
return client;
}
/* shutdown all clients */
void shutdownClients() {
for (AdminClient client : clients) {
tryShutdownOfClient(client);
}
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, ClientManager.class,
"Shutting down %s clients", clients.size());
}
private void tryShutdownOfClient(Client client) {
try {
client.shutdown();
} catch (Throwable e) {
Logging.logMessage(Logging.LEVEL_WARN, Logging.Category.tool, ClientManager.class,
"Error while shutting down clients");
Logging.logError(Logging.LEVEL_WARN, ClientManager.class, e);
}
}
}

View File

@@ -0,0 +1,326 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR;
import org.xtreemfs.utils.DefaultDirConfig;
import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.*;
import static org.xtreemfs.common.benchmark.BenchmarkUtils.BenchmarkType;
import static org.xtreemfs.foundation.logging.Logging.LEVEL_INFO;
import static org.xtreemfs.foundation.logging.Logging.logError;
import static org.xtreemfs.foundation.logging.Logging.logMessage;
/**
* Controller for the benchmark library.
* <p/>
*
* The {@link Controller} and {@link BenchmarkConfig} represent the API to the benchmark library.
*
* @author jensvfischer
*
*/
public class Controller {
private BenchmarkConfig config;
private ClientManager clientManager;
private VolumeManager volumeManager;
private ThreadPoolExecutor threadPool;
/**
* Create a new controller object.
*
* @param config
* The parameters to be used for the benchmark.
*/
public Controller(BenchmarkConfig config) throws Exception {
this.config = config;
this.clientManager = new ClientManager(config);
this.volumeManager = new VolumeManager(config, clientManager.getNewClient());
Thread.setDefaultUncaughtExceptionHandler(new UncaughtExceptionHandlerBenchmark(this));
}
/**
* Create and open the volumes needed for the benchmarks. <br/>
* If the volumeNames given are already existing volumes, the volumes are only opened. <br/>
*
* @param volumeNames
* the volumes to be created/opened
* @throws Exception
*/
public void setupVolumes(String... volumeNames) throws Exception {
if (volumeNames.length < 1)
throw new IllegalArgumentException("Number of volumes < 1");
else
volumeManager.openVolumes(volumeNames);
}
/**
* Create and open default volumes for the benchmarks. <br/>
* The volumes will be created with the options given by {@link BenchmarkConfig}.
*
* @param numberOfVolumes the number of volumes to be created
* @throws Exception
*/
public void setupDefaultVolumes(int numberOfVolumes) throws Exception {
if (numberOfVolumes < 1)
throw new IllegalArgumentException("Number of volumes < 1");
else
volumeManager.createDefaultVolumes(numberOfVolumes);
}
/**
* Starts sequential write benchmarks with the parameters specified in the {@link BenchmarkConfig}. <br/>
*
* @return the results of the benchmark (see {@link BenchmarkResult})
* @throws Exception
*/
public ArrayList<BenchmarkResult> startSequentialWriteBenchmark(long size, int numberOfThreads) throws Exception {
verifySizesAndThreads(size, numberOfThreads, BenchmarkType.SEQ_WRITE);
return startBenchmark(size, numberOfThreads, BenchmarkType.SEQ_WRITE);
}
/**
* Starts unaligned sequential write benchmarks with the parameters specified in the {@link BenchmarkConfig}. <br/>
*
* @return the results of the benchmark (see {@link BenchmarkResult})
* @throws Exception
*/
public ArrayList<BenchmarkResult> startUnalignedSequentialWriteBenchmark(long size, int numberOfThreads) throws Exception {
verifySizesAndThreads(size, numberOfThreads, BenchmarkType.SEQ_UNALIGNED_WRITE);
return startBenchmark(size, numberOfThreads, BenchmarkType.SEQ_UNALIGNED_WRITE);
}
/**
* Starts sequential read benchmarks with the parameters specified in the {@link BenchmarkConfig}. <br/>
*
* @return the results of the benchmark (see {@link BenchmarkResult})
* @throws Exception
*/
public ArrayList<BenchmarkResult> startSequentialReadBenchmark(long size, int numberOfThreads) throws Exception {
verifySizesAndThreads(size, numberOfThreads, BenchmarkType.SEQ_READ);
return startBenchmark(size, numberOfThreads, BenchmarkType.SEQ_READ);
}
/**
* Starts random write benchmarks with the parameters specified in the {@link BenchmarkConfig}. <br/>
*
* @return the results of the benchmark (see {@link BenchmarkResult})
* @throws Exception
*/
public ArrayList<BenchmarkResult> startRandomWriteBenchmark(long size, int numberOfThreads) throws Exception {
verifySizesAndThreads(size, numberOfThreads, BenchmarkType.RAND_WRITE);
return startBenchmark(size, numberOfThreads, BenchmarkType.RAND_WRITE);
}
/**
* Starts random read benchmarks with the parameters specified in the {@link BenchmarkConfig}. <br/>
*
* @return the results of the benchmark (see {@link BenchmarkResult})
* @throws Exception
*/
public ArrayList<BenchmarkResult> startRandomReadBenchmark(long size, int numberOfThreads) throws Exception {
verifySizesAndThreads(size, numberOfThreads, BenchmarkType.RAND_READ);
return startBenchmark(size, numberOfThreads, BenchmarkType.RAND_READ);
}
/**
* Starts filebased write benchmarks with the parameters specified in the {@link BenchmarkConfig}. <br/>
*
* @return the results of the benchmark (see {@link BenchmarkResult})
* @throws Exception
*/
public ArrayList<BenchmarkResult> startFilebasedWriteBenchmark(long size, int numberOfThreads) throws Exception {
verifySizesAndThreads(size, numberOfThreads, BenchmarkType.FILES_WRITE);
return startBenchmark(size, numberOfThreads, BenchmarkType.FILES_WRITE);
}
/**
* Starts filebased read benchmarks with the parameters specified in the {@link BenchmarkConfig}. <br/>
*
* @return the results of the benchmark (see {@link BenchmarkResult})
* @throws Exception
*/
public ArrayList<BenchmarkResult> startFilebasedReadBenchmark(long size, int numberOfThreads) throws Exception {
verifySizesAndThreads(size, numberOfThreads, BenchmarkType.FILES_READ);
return startBenchmark(size, numberOfThreads, BenchmarkType.FILES_READ);
}
public void tryConnection() throws Exception {
try {
clientManager.getNewClient().getServiceByType(DIR.ServiceType.SERVICE_TYPE_OSD);
} catch (Exception e) {
Logging.logMessage(Logging.LEVEL_ERROR, Logging.Category.tool, Controller.class,
"Failed to establish connection to DIR server.");
throw e;
}
}
/**
* Get the DIR address from default_dir
*
* @return the DIR address, or null if default_dir wasn't found or couldn't be accessed.
*/
public static String[] getDefaultDir() {
String[] dirAddresses;
DefaultDirConfig cfg = null;
try {
cfg = new DefaultDirConfig();
dirAddresses = cfg.getDirectoryServices();
return dirAddresses;
} catch (IOException e) {
logMessage(LEVEL_INFO, Logging.Category.tool, Controller.class,
"Could not access or find Default DIR Config in %s. Using default (localhost).",
DefaultDirConfig.DEFAULT_DIR_CONFIG);
return null;
}
}
/**
* Deletes all created volumes and files and shuts down all clients. This method should be called when all
* benchmarks are finished. The deletion of the volumes and files is regulated by the noCleanup options in
* {@link BenchmarkConfig}.
*
* @throws Exception
*/
public void teardown() throws Exception {
deleteVolumesAndFiles();
if (config.isOsdCleanup())
volumeManager.cleanupOSD();
shutdownClients();
shutdownThreadPool();
}
private void verifySizesAndThreads(long size, int threads, BenchmarkType type) {
if ((type == BenchmarkType.SEQ_READ) || (type == BenchmarkType.SEQ_WRITE)) {
if (size % (config.getStripeSizeInBytes() * config.getStripeWidth()) != 0)
throw new IllegalArgumentException("size of " + type
+ " must satisfy: size mod (stripeSize * stripeWidth) == 0");
if (size < config.getChunkSizeInBytes())
throw new IllegalArgumentException("Chunksize < size of " + type);
}
if ((type == BenchmarkType.RAND_READ) || (type == BenchmarkType.RAND_WRITE)) {
if (config.getBasefileSizeInBytes() < size)
throw new IllegalArgumentException("Basefile < size of " + type);
if (size < config.getChunkSizeInBytes())
throw new IllegalArgumentException("Chunksize < size of " + type);
}
if ((type == BenchmarkType.FILES_WRITE) || (type == BenchmarkType.FILES_READ)) {
if (size % config.getFilesize() != 0)
throw new IllegalArgumentException("Size of " + type + " must satisfy: size mod filesize == 0");
}
if (volumeManager.getVolumes().size() < threads )
throw new IllegalArgumentException("Less volumes than parallel threads");
}
/*
* Starts benchmarks in parallel. Every benchmark is started within its own thread. The method waits for all threads
* to finish.
*/
private ArrayList<BenchmarkResult> startBenchmark(long size, int numberOfThreads, BenchmarkType benchmarkType)
throws Exception {
checkThreadPool(numberOfThreads);
CompletionService<BenchmarkResult> completionService = new ExecutorCompletionService<BenchmarkResult>(threadPool);
ArrayList<Future<BenchmarkResult>> futures = new ArrayList<Future<BenchmarkResult>>();
/* create and start all benchmark tasks (i.e. submit to CompletionService) */
for (int i = 0; i < numberOfThreads ; i++) {
AbstractBenchmark benchmark = BenchmarkFactory.createBenchmark(size, benchmarkType, config, clientManager.getNewClient(), volumeManager);
benchmark.prepareBenchmark();
Future<BenchmarkResult> future = completionService.submit(benchmark);
futures.add(future);
}
ArrayList<BenchmarkResult> results = awaitCompletion(numberOfThreads, completionService, futures);
/* Set BenchmarkResult type and number of threads */
for (BenchmarkResult res : results) {
res.setBenchmarkType(benchmarkType);
res.setNumberOfReadersOrWriters(numberOfThreads);
}
/* reset VolumeManager to prepare for possible consecutive benchmarks */
volumeManager.reset();
return results;
}
private void checkThreadPool(int numberOfThreads) throws InterruptedException {
/* check if a thread pool is already instantiated */
if (null == threadPool)
threadPool = new ThreadPoolExecutor(numberOfThreads, numberOfThreads, 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>());
/* check if thread pool has the needed number of threads, adjust if necessary */
else if (threadPool.getPoolSize() != numberOfThreads){
threadPool.setCorePoolSize(numberOfThreads);
threadPool.setMaximumPoolSize(numberOfThreads);
}
}
private ArrayList<BenchmarkResult> awaitCompletion(int numberOfThreads, CompletionService<BenchmarkResult> completionService, ArrayList<Future<BenchmarkResult>> futures) throws Exception {
ArrayList<BenchmarkResult> results = new ArrayList<BenchmarkResult>(numberOfThreads);
Exception exception = null;
/* wait for all threads to finish */
for (int i = 0; i < numberOfThreads; i++) {
try {
Future<BenchmarkResult> benchmarkResultFuture = completionService.take();
futures.remove(benchmarkResultFuture);
BenchmarkResult result = benchmarkResultFuture.get();
results.add(result);
} catch (ExecutionException e) {
logMessage(Logging.LEVEL_ERROR, Logging.Category.tool, this, "An exception occurred within an benchmark task.");
logError(Logging.LEVEL_ERROR, this, e.getCause());
/* cancel all other running benchmark tasks in case of failure in one task */
for (Future future : futures) {future.cancel(true);}
/*
* Future.cancel(true) works by setting the interrupted flag. In some cases the cancellation doesn't
* work (the query of the interrupted status returns false). Most likely the interrupted status is
* consumed somewhere without causing the task to stop or to reach the toplevel task code (the code in
* the XYBenchmark classes). This could be due to catching an InterruptedException exception or calling
* Thread.interrupted() (both consumes the flag status) without reestablishing the status with
* Thread.currentThread().interrupt(). Therefore an additional cancellation needs to be deployed...
*/
AbstractBenchmark.cancel();
exception = e;
} catch (CancellationException e) {
// consume (planned cancellation from calling future.cancel())
logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Benchmark task has been canceled due to an exception in another benchmark task.");
}
}
if (null != exception)
throw new BenchmarkFailedException(exception.getCause());
return results;
}
/* delete all created volumes and files depending on the noCleanup options */
void deleteVolumesAndFiles() {
if (!config.isNoCleanup() && !config.isNoCleanupVolumes()) {
volumeManager.deleteCreatedFiles(); // is needed in case no volume was created
volumeManager.deleteCreatedVolumes();
} else if (!config.isNoCleanup())
volumeManager.deleteCreatedFiles();
}
void shutdownClients() {
clientManager.shutdownClients();
}
void shutdownThreadPool(){
if (threadPool != null)
threadPool.shutdownNow();
}
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
/**
* Abstract baseclass for filebased benchmarks.
* <p/>
* A filebased benchmark writes or reads lots of small files.
*
* @author jensvfischer
*/
abstract class FilebasedBenchmark extends AbstractBenchmark {
static final String BENCHMARK_FILENAME = "benchmarks/randomBenchmark/benchFile";
final int filesize;
FilebasedBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
super(size, config, client, volumeManager);
this.filesize = config.getFilesize();
}
static String getBenchmarkFilename() {
return BENCHMARK_FILENAME;
}
}

View File

@@ -0,0 +1,70 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.FileHandle;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import java.io.IOException;
import java.util.Random;
/**
* Class implementing a filebased read benchmark.
*
* @author jensvfischer
*/
class FilebasedReadBenchmark extends FilebasedBenchmark {
private String[] filenames;
FilebasedReadBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
super(size, config, client, volumeManager);
}
@Override
void prepareBenchmark() throws Exception {
this.filenames = volumeManager.getRandomFilelistForVolume(volume, benchmarkSize);
}
/* Called within the benchmark method. Performs the actual reading of data from the volume. */
@Override
long performIO(byte[] data, long numberOfBlocks) throws IOException {
long numberOfFilesToRead = benchmarkSize / filesize;
int filenamesSize = filenames.length;
long byteCounter = 0;
Random random = new Random();
int flags = GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDONLY.getNumber();
for (long i = 0; !cancelled && i < numberOfFilesToRead; i++) {
String filename = filenames[random.nextInt(filenamesSize)];
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), filename, flags);
if (filesize <= requestSize) {
random.nextBytes(data);
byteCounter += fileHandle.read(config.getUserCredentials(), data, filesize, 0);
} else
for (long j = 0; j < filesize / requestSize; j++) {
long nextOffset = j * requestSize;
assert nextOffset >= 0 : "Offset < 0 not allowed";
byteCounter += fileHandle.read(config.getUserCredentials(), data, requestSize, nextOffset);
}
fileHandle.close();
}
return byteCounter;
}
@Override
void finalizeBenchmark() throws Exception {
}
}

View File

@@ -0,0 +1,74 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.FileHandle;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import java.io.IOException;
import java.util.LinkedList;
import java.util.Random;
/**
* Class implementing a filebased write benchmark.
*
* @author jensvfischer
*/
class FilebasedWriteBenchmark extends FilebasedBenchmark {
private LinkedList<String> filenames;
FilebasedWriteBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
super(size, config, client, volumeManager);
filenames = new LinkedList<String>();
}
@Override
void prepareBenchmark() throws Exception {
}
/* Called within the benchmark method. Performs the actual reading of data from the volume. */
@Override
long performIO(byte[] data, long numberOfBlocks) throws IOException {
long numberOfFiles = benchmarkSize / filesize;
long byteCounter = 0;
Random random = new Random();
int flags = GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_CREAT.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_TRUNC.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber();
for (long i = 0; !cancelled && i < numberOfFiles; i++) {
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), BENCHMARK_FILENAME + i, flags, 511);
this.filenames.add(BENCHMARK_FILENAME + i);
if (filesize <= requestSize) {
random.nextBytes(data);
byteCounter += fileHandle.write(config.getUserCredentials(), data, filesize, 0);
} else
for (long j = 0; j < filesize / requestSize; j++) {
long nextOffset = j * requestSize;
assert nextOffset >= 0 : "Offset < 0 not allowed";
random.nextBytes(data);
byteCounter += fileHandle.write(config.getUserCredentials(), data, requestSize, nextOffset);
}
fileHandle.close();
}
return byteCounter;
}
@Override
void finalizeBenchmark() throws Exception {
volumeManager.setRandomFilelistForVolume(volume, filenames);
volumeManager.addCreatedFiles(volume, filenames);
}
}

View File

@@ -0,0 +1,112 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.FileHandle;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import java.util.LinkedList;
import java.util.Random;
/**
* Abstract baseclass for random IO benchmarks.
* <p/>
* Random IO benchmarks write or read small blocks with random offsets within a large basefile.
*
* @author jensvfischer
*/
abstract class RandomOffsetbasedBenchmark extends AbstractBenchmark {
final static int RANDOM_IO_BLOCKSIZE = 1024 * 4; // 4 KiB
final long sizeOfBasefile;
final static String BASFILE_FILENAME = "benchmarks/basefile";
RandomOffsetbasedBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
super(size, config, client, volumeManager);
sizeOfBasefile = config.getBasefileSizeInBytes();
}
@Override
void prepareBenchmark() throws Exception {
/* create file to read from if not existing */
if (basefileDoesNotExists()) {
createBasefile();
}
}
@Override
void finalizeBenchmark() {
}
/* convert to 4 KiB Blocks */
long convertTo4KiBBlocks(long numberOfBlocks) {
return (numberOfBlocks * (long) requestSize) / (long) RANDOM_IO_BLOCKSIZE;
}
long generateNextRandomOffset() {
long nextOffset = Math.round(Math.random() * (sizeOfBasefile - (long) RANDOM_IO_BLOCKSIZE));
assert nextOffset >= 0 : "Offset < 0. Offset: " + nextOffset + " Basefilesize: " + sizeOfBasefile;
assert nextOffset <= (sizeOfBasefile - RANDOM_IO_BLOCKSIZE) : " Offset > Filesize. Offset: " + nextOffset
+ "Basefilesize: " + sizeOfBasefile;
return nextOffset;
}
/* check if a basefile to read from exists and if it has the right size */
boolean basefileDoesNotExists() throws Exception {
try {
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), BASFILE_FILENAME,
GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDONLY.getNumber());
long fileSizeInBytes = fileHandle.getAttr(config.getUserCredentials()).getSize();
fileHandle.close();
return sizeOfBasefile != fileSizeInBytes;
} catch (PosixErrorException e) {
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "No basefile found.");
return true;
}
}
/* Create a large file to read from */
private void createBasefile() throws Exception {
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this,
"Start creating a basefile of size %s bytes.", sizeOfBasefile);
long numberOfBlocks = sizeOfBasefile / (long) requestSize;
Random random = new Random();
int flags = GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_CREAT.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_TRUNC.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber();
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), BASFILE_FILENAME, flags, 511);
long byteCounter = 0;
byte[] data = new byte[requestSize];
for (long j = 0; j < numberOfBlocks; j++) {
long nextOffset = j * requestSize;
assert nextOffset >= 0 : "Offset < 0 not allowed";
random.nextBytes(data);
byteCounter += fileHandle.write(config.getUserCredentials(), data, requestSize, nextOffset);
}
fileHandle.close();
assert byteCounter == sizeOfBasefile : " Error while writing the basefile for the random io benchmark";
addBasefileToCreatedFiles();
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Basefile written. Size %s Bytes.",
byteCounter);
}
private void addBasefileToCreatedFiles() throws Exception {
if (!config.isNoCleanupBasefile()) {
LinkedList<String> createdFiles = new LinkedList<String>();
createdFiles.add(BASFILE_FILENAME);
volumeManager.addCreatedFiles(volume, createdFiles);
}
}
}

View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.FileHandle;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import java.io.IOException;
/**
* Class implementing a random IO read benchmarks.
* <p/>
* A random IO read benchmarks reads small blocks with random offsets within a large basefile.
*
* @author jensvfischer
*/
class RandomReadBenchmark extends RandomOffsetbasedBenchmark {
RandomReadBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
super(size, config, client, volumeManager);
}
/* Called within the benchmark method. Performs the actual reading of data from the volume. */
@Override
long performIO(byte[] data, long numberOfBlocks) throws IOException {
numberOfBlocks = convertTo4KiBBlocks(numberOfBlocks);
long byteCounter = 0;
for (long j = 0; !cancelled && j < numberOfBlocks; j++) {
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), BASFILE_FILENAME,
GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDONLY.getNumber());
long nextOffset = generateNextRandomOffset();
byteCounter += fileHandle.read(config.getUserCredentials(), data, RANDOM_IO_BLOCKSIZE, nextOffset);
fileHandle.close();
}
return byteCounter;
}
}

View File

@@ -0,0 +1,54 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.FileHandle;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import java.io.IOException;
import java.util.Random;
/**
* Class implementing a random IO read benchmarks.
* <p/>
* A random IO read benchmarks reads small blocks with random offsets within a large basefile.
*
* @author jensvfischer
*/
class RandomWriteBenchmark extends RandomOffsetbasedBenchmark {
RandomWriteBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
super(size, config, client, volumeManager);
}
/* Called within the benchmark method. Performs the actual reading of data from the volume. */
@Override
long performIO(byte[] data, long numberOfBlocks) throws IOException {
Random random = new Random();
numberOfBlocks = convertTo4KiBBlocks(numberOfBlocks);
long byteCounter = 0;
int flags = GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_CREAT.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_WRONLY.getNumber();
for (long j = 0; !cancelled && j < numberOfBlocks; j++) {
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), BASFILE_FILENAME, flags, 511);
long nextOffset = generateNextRandomOffset();
random.nextBytes(data);
byteCounter += fileHandle.write(config.getUserCredentials(), data, RANDOM_IO_BLOCKSIZE, nextOffset);
fileHandle.close();
}
return byteCounter;
}
}

View File

@@ -0,0 +1,37 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
/**
* Abstract baseclass for sequential benchmarks.
*
* @author jensvfischer
*/
abstract class SequentialBenchmark extends AbstractBenchmark {
static final String BENCHMARK_FILENAME = "benchmarks/sequentialBenchmark/benchFile";
SequentialBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
super(size, config, client, volumeManager);
}
@Override
void prepareBenchmark() throws Exception {
}
@Override
void finalizeBenchmark() throws Exception {
}
static String getBenchmarkFilename() {
return BENCHMARK_FILENAME;
}
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.FileHandle;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import java.io.IOException;
/**
* Class implementing a sequential read benchmark.
*
* @author jensvfischer
*/
class SequentialReadBenchmark extends SequentialBenchmark {
private String[] filenames;
SequentialReadBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager) throws Exception {
super(size, config, client, volumeManager);
}
@Override
void prepareBenchmark() throws Exception {
this.filenames = volumeManager.getSequentialFilelistForVolume(volume, benchmarkSize);
}
/* Called within the benchmark method. Performs the actual reading of data from the volume. */
@Override
long performIO(byte[] data, long numberOfBlocks) throws IOException {
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), filenames[0],
GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDONLY.getNumber());
try {
return tryPerformIO(data, numberOfBlocks, fileHandle);
} catch (IOException e) {
/* closing the filehandle manually seems to be the only way to avoid an AssertionError in
* VolumeImplementation.internalShutdown() when shutting down client */
fileHandle.close();
throw e;
}
}
private long tryPerformIO(byte[] data, long numberOfBlocks, FileHandle fileHandle) throws IOException {
long byteCounter = 0;
for (long j = 0; !cancelled && j < numberOfBlocks; j++) {
long nextOffset = j * requestSize;
assert nextOffset >= 0 : "Offset < 0 not allowed";
byteCounter += fileHandle.read(config.getUserCredentials(), data, requestSize, nextOffset);
}
fileHandle.close();
return byteCounter;
}
}

View File

@@ -0,0 +1,74 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import java.io.IOException;
import java.util.LinkedList;
import java.util.Random;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.FileHandle;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
/**
* Class implementing a sequential write benchmark.
*
* @author jensvfischer
*/
class SequentialWriteBenchmark extends SequentialBenchmark {
private LinkedList<String> filenames;
SequentialWriteBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager)
throws Exception {
super(size, config, client, volumeManager);
filenames = new LinkedList<String>();
}
/* Called within the benchmark method. Performs the actual writing of data to the volume. */
@Override
long performIO(byte[] data, long numberOfBlocks) throws IOException {
int flags = GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_CREAT.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_TRUNC.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber();
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), BENCHMARK_FILENAME + 0, flags, 511);
try {
return tryPerformIO(data, numberOfBlocks, fileHandle);
} catch (IOException e) {
/* closing the filehandle manually seems to be the only way to avoid an AssertionError in
* VolumeImplementation.internalShutdown() when shutting down client */
fileHandle.close();
throw e;
}
}
private long tryPerformIO(byte[] data, long numberOfBlocks, FileHandle fileHandle) throws IOException {
Random random = new Random();
this.filenames.add(BENCHMARK_FILENAME + 0);
long byteCounter = 0;
for (long j = 0; !cancelled && j < numberOfBlocks; j++) {
long nextOffset = j * requestSize;
assert nextOffset >= 0 : "Offset < 0 not allowed";
random.nextBytes(data);
byteCounter += fileHandle.write(config.getUserCredentials(), data, requestSize, nextOffset);
}
fileHandle.close();
return byteCounter;
}
@Override
void finalizeBenchmark() throws Exception {
volumeManager.setSequentialFilelistForVolume(volume, filenames);
volumeManager.addCreatedFiles(volume, filenames);
}
}

View File

@@ -0,0 +1,61 @@
package org.xtreemfs.common.benchmark;
import java.io.IOException;
import java.util.LinkedList;
import java.util.Random;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.FileHandle;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
public class UnalignedSequentialWriteBenchmark extends SequentialBenchmark {
private final LinkedList<String> filenames;
UnalignedSequentialWriteBenchmark(long size, BenchmarkConfig config, AdminClient client, VolumeManager volumeManager)
throws Exception {
super(size, config, client, volumeManager);
filenames = new LinkedList<String>();
}
@Override
long performIO(byte[] data, long numberOfBlocks) throws IOException {
int flags = GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_CREAT.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_TRUNC.getNumber()
| GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber();
FileHandle fileHandle = volume.openFile(config.getUserCredentials(), BENCHMARK_FILENAME + 0, flags, 0777);
try {
return tryPerformIO(data, numberOfBlocks, fileHandle);
} catch (IOException e) {
/*
* closing the filehandle manually seems to be the only way to avoid an AssertionError in
* VolumeImplementation.internalShutdown() when shutting down client
*/
fileHandle.close();
throw e;
}
}
private long tryPerformIO(byte[] data, long numberOfBlocks, FileHandle fileHandle) throws IOException {
Random random = new Random();
this.filenames.add(BENCHMARK_FILENAME + 0);
long byteCounter = 0;
for (long j = 0; !cancelled && j < numberOfBlocks; j++) {
long stripesPerRequest = (long) Math.ceil((double) requestSize / (double) config.getStripeSizeInBytes());
long nextOffset = j * stripesPerRequest * config.getStripeSizeInBytes();
random.nextBytes(data);
byteCounter += fileHandle.write(config.getUserCredentials(), data, requestSize, nextOffset);
}
fileHandle.close();
return byteCounter;
}
@Override
void finalizeBenchmark() throws Exception {
volumeManager.setSequentialFilelistForVolume(volume, filenames);
volumeManager.addCreatedFiles(volume, filenames);
}
}

View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import org.xtreemfs.foundation.logging.Logging;
/**
* UncaughtExceptionHandler for the benchmark tool.
* <p/>
*
* Shuts down all benchmarks and cleans up in case of uncaught exceptions.
*
* @author jensvfischer
*/
class UncaughtExceptionHandlerBenchmark implements Thread.UncaughtExceptionHandler {
private Controller controller;
UncaughtExceptionHandlerBenchmark(Controller controller) {
this.controller = controller;
}
/**
* Method invoked when the given thread terminates due to the given uncaught exception.
* <p>
* Any exception thrown by this method will be ignored by the Java Virtual Machine.
*
* @param t
* the thread
* @param e
* the exception
*/
@Override
public void uncaughtException(Thread t, Throwable e) {
Logging.logMessage(Logging.LEVEL_ERROR, this,
"An uncaught exception was thrown in %s (Thread-Id: %s). The benchmark tool will be shut down.", t.getName(), t.getId());
Logging.logError(Logging.LEVEL_ERROR, this, e);
controller.deleteVolumesAndFiles();
controller.shutdownClients();
controller.shutdownThreadPool();
}
}

View File

@@ -0,0 +1,455 @@
/*
* Copyright (c) 2012-2013 by Jens V. Fischer, Zuse Institute Berlin
*
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.benchmark;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.xtreemfs.common.libxtreemfs.AdminClient;
import org.xtreemfs.common.libxtreemfs.Volume;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.POSIXErrno;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC;
/**
* Volume Manager (Singleton).
* <p/>
*
* Class for managing volumes in the benchmark tools. Allows for the creation and deletion of volumes, manages the
* assignment of volumes to benchmark threads and does bookkeeping of created files and volumes.
* <p/>
* The bookkeeping of created volumes is necessary for the deletion (only) of created volumes and files as well as for
* the filebased benchmarks.
*
* @author jensvfischer
*/
class VolumeManager {
private static final String VOLUME_BASE_NAME = "benchmark";
private BenchmarkConfig config;
private AdminClient client;
private int currentPosition;
private LinkedList<Volume> volumes;
private LinkedList<Volume> createdVolumes;
private HashMap<Volume, HashSet<String>> createdFiles;
private HashMap<Volume, String[]> filelistsSequentialBenchmark;
private HashMap<Volume, String[]> filelistsRandomBenchmark;
VolumeManager(BenchmarkConfig config, AdminClient client) throws Exception {
this.config = config;
currentPosition = 0;
this.client = client;
this.volumes = new LinkedList<Volume>();
this.createdVolumes = new LinkedList<Volume>();
this.filelistsSequentialBenchmark = new HashMap<Volume, String[]>(5);
this.filelistsRandomBenchmark = new HashMap<Volume, String[]>(5);
this.createdFiles = new HashMap<Volume, HashSet<String>>();
}
/* cycles through the list of volumes to assign to volumes to benchmarks */
Volume getNextVolume() {
return volumes.get(currentPosition++);
}
/*
* reset the position counter of the volume list. Needs to be called if one wants to reuse the volumes for a
* consecutive benchmark
*/
void reset() {
currentPosition = 0;
}
/* used if no volumes were specified */
void createDefaultVolumes(int numberOfVolumes) throws IOException, IllegalAccessException, InstantiationException, ClassNotFoundException {
for (int i = 0; i < numberOfVolumes; i++) {
Volume volume = createAndOpenVolume(VOLUME_BASE_NAME + i);
addToVolumes(volume);
}
}
/* add a volume to the volume list */
private void addToVolumes(Volume volume) {
if (!volumes.contains(volume))
volumes.add(volume);
}
/* opens multiple volumes (and creates and opens volumes if they do not exist) */
void openVolumes(String... volumeName) throws IOException, IllegalAccessException, InstantiationException, ClassNotFoundException {
this.volumes = new LinkedList<Volume>();
for (String each : volumeName) {
volumes.add(createAndOpenVolume(each));
}
verifyVolumeSizes();
}
/* opens a single volume (or creates and opens a volume if it does not exist) */
private Volume createAndOpenVolume(String volumeName) throws IOException, IllegalAccessException, ClassNotFoundException, InstantiationException {
Volume volume = null;
try { /* try creating the volume */
List<GlobalTypes.KeyValuePair> volumeAttributes = new ArrayList<GlobalTypes.KeyValuePair>();
client.createVolume(config.getAuth(), config.getUserCredentials(), volumeName, 511, config.getUsername(),
config.getGroup(), GlobalTypes.AccessControlPolicyType.ACCESS_CONTROL_POLICY_POSIX,
GlobalTypes.StripingPolicyType.STRIPING_POLICY_RAID0, 128, 1, volumeAttributes);
volume = client.openVolume(volumeName, config.getSslOptions(), config.getOptions());
createdVolumes.add(volume);
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Created volume %s", volumeName);
} catch (PosixErrorException e) {
if (e.getPosixError() == POSIXErrno.POSIX_ERROR_EEXIST) { /* i.e. volume already exists */
volume = client.openVolume(volumeName, config.getSslOptions(), config.getOptions());
} else
throw e;
}
setStripeSizeAndWidth(volume);
createDirStructure(volume);
/* set osd selection policy */
if ( ! config.getOsdSelectionPolicies().equals(""))
volume.setOSDSelectionPolicy(config.getUserCredentials(), config.getOsdSelectionPolicies());
/* set policy attributes */
Map<String, String> attributes = config.getPolicyAttributes();
for (String attribute : attributes.keySet())
volume.setPolicyAttribute(config.getUserCredentials(), attribute, attributes.get(attribute));
if (!config.getReplicationPolicy().equals("")) {
volume.setDefaultReplicationPolicy(config.getUserCredentials(), "/", config.getReplicationPolicy(),
config.getReplicationFactor(), 0);
}
return volume;
}
private void verifyVolumeSizes() throws IOException {
int stripeSize = getVolStripeSize(this.volumes.getFirst());
int stripeWidth= getVolStripeWidth(this.volumes.getFirst());
boolean flag = false;
for (Volume volume : this.volumes){
if (stripeSize != getVolStripeSize(volume))
flag = true;
if (stripeWidth != getVolStripeWidth(volume))
flag = true;
}
if (flag)
Logging.logMessage(Logging.LEVEL_WARN, Logging.Category.tool, this,
"The stripe size and width of all volumes is not equal (it should to achieve meaningful benchmarks");
}
private void setStripeSizeAndWidth(Volume volume) throws IOException {
int size, width;
int sizeConf = config.getStripeSizeInKiB();
int widthConf = config.getStripeWidth();
int sizeVol = getVolStripeSize(volume);
int widthVol = getVolStripeWidth(volume);
if (!config.isStripeSizeSet() && !config.isStripeWidthSet()){
config.setStripeSizeInBytes(sizeVol*BenchmarkUtils.KiB_IN_BYTES);
config.setStripeWidth(widthVol);
return;
}
else {
if (!config.isStripeSizeSet() && config.isStripeWidthSet()) {
size = sizeVol;
width = widthConf;
} else if (config.isStripeSizeSet() && !config.isStripeWidthSet()) {
size = sizeConf;
width = widthVol;
} else if (config.isStripeSizeSet() && config.isStripeWidthSet()){
size = sizeConf;
width = widthConf;
} else
throw new UnknownError("Logical error. The above if-else statements should have been exhausting");
}
String val = "{\"pattern\":\"STRIPING_POLICY_RAID0\",\"width\":" + width + ",\"size\":" + size + "}";
volume.setXAttr(config.getUserCredentials(), "", "xtreemfs.default_sp", val,
MRC.XATTR_FLAGS.XATTR_FLAGS_REPLACE);
config.setStripeSizeInBytes(size*BenchmarkUtils.KiB_IN_BYTES);
config.setStripeWidth(width);
}
private int getVolStripeSize(Volume volume) throws IOException {
String valueStr = volume.getXAttr(config.getUserCredentials(), "", "xtreemfs.default_sp" );
String sizeStr = valueStr.split(",")[2];
return Integer.valueOf(sizeStr.substring(sizeStr.indexOf(":")+1, sizeStr.length()-1));
}
private int getVolStripeWidth(Volume volume) throws IOException {
String valueStr = volume.getXAttr(config.getUserCredentials(), "", "xtreemfs.default_sp" );
String widthStr = valueStr.split(",")[1];
return Integer.valueOf(widthStr.substring(widthStr.indexOf(":")+1));
}
private void createDirStructure(Volume volume) throws IOException {
createDir(volume, "/benchmarks/sequentialBenchmark");
createDir(volume, "/benchmarks/randomBenchmark");
}
private void createDir(Volume volume, String directory) throws IOException {
try { /* try to create directory benchmark */
volume.createDirectory(config.getUserCredentials(), directory, 0777, true);
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this,
"/benchmarks/randomBenchmark created on volume %s", volume.getVolumeName());
/* catch should be entered when directory already exists */
} catch (PosixErrorException e) {
/* if its not because directory already exists, throw error again */
if (!(e.getPosixError() == POSIXErrno.POSIX_ERROR_EEXIST)) {
throw e;
}
}
}
/*
* adds a filelist with files from a sequential benchmark (used to pass the list of files written by a sequential
* write benchmark to a sequential read benchmark)
*/
void setSequentialFilelistForVolume(Volume volume, LinkedList<String> filelist) {
String[] files = new String[filelist.size()];
synchronized (this) {
filelistsSequentialBenchmark.put(volume, filelist.toArray(files));
}
}
/*
* adds a filelist with files from a filebased benchmark (used to pass the list of files written by a filebased
* write benchmark to a filebased read benchmark)
*/
void setRandomFilelistForVolume(Volume volume, LinkedList<String> filelist) {
String[] files = new String[filelist.size()];
synchronized (this) {
filelistsRandomBenchmark.put(volume, filelist.toArray(files));
}
}
/*
* get the list of files written to a volume (used to pass the list of files written by a filebased write benchmark
* to a filebased read benchmark)
*/
synchronized String[] getSequentialFilelistForVolume(Volume volume, long benchmarkSizeInBytes) throws IOException {
String[] filelist;
/* null means no filelist from a previous write benchmark has been deposited */
if (null == filelistsSequentialBenchmark.get(volume)) {
filelist = inferFilelist(volume, SequentialBenchmark.getBenchmarkFilename());
if (benchmarkSizeInBytes == calculateTotalSizeOfFilelist(volume, filelist)) {
filelistsSequentialBenchmark.put(volume, filelist);
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this,
"Succesfully infered filelist on volume %s.", volume.getVolumeName());
} else {
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Infering filelist failed",
volume.getVolumeName());
throw new IllegalArgumentException("No valid files for benchmark found");
}
}
return filelistsSequentialBenchmark.get(volume);
}
/*
* get the list of files written to a volume (used to pass the list of files written by a filebased write benchmark
* to a filebased read benchmark)
*/
synchronized String[] getRandomFilelistForVolume(Volume volume, long benchmarkSizeInBytes) throws IOException {
String[] filelist;
/* null means no filelist from a previous write benchmark has been deposited */
if (null == filelistsRandomBenchmark.get(volume)) {
filelist = inferFilelist(volume, FilebasedBenchmark.getBenchmarkFilename());
if (benchmarkSizeInBytes == calculateTotalSizeOfFilelist(volume, filelist)) {
filelistsRandomBenchmark.put(volume, filelist);
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this,
"Succesfully infered filelist on volume %s.", volume.getVolumeName());
} else {
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Infering filelist failed",
volume.getVolumeName());
throw new IllegalArgumentException("No valid files for benchmark found");
}
}
return filelistsRandomBenchmark.get(volume);
}
/*
* Tries to infer a list of files from sequential benchmarks. This is called if a sequential read benchmark was
* executed without a previous write benchmark (e.g. when the benchmark tool is executed first to do write
* benchmarks with the noCleanup option, a consecutive renewed execution of a read benchmark tries to infer the
* filelist)
*/
private String[] inferFilelist(Volume volume, String pathToBasefile) throws IOException {
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Read benchmark without write benchmark. "
+ "Trying to infer a filelist on volume %s", volume.getVolumeName());
String path = pathToBasefile.substring(0, pathToBasefile.lastIndexOf('/'));
String filename = pathToBasefile.substring(pathToBasefile.lastIndexOf('/') + 1);
List<MRC.DirectoryEntry> directoryEntries = volume.readDir(config.getUserCredentials(), path, 0, 0, true)
.getEntriesList();
ArrayList<String> entries = new ArrayList<String>(directoryEntries.size());
for (MRC.DirectoryEntry directoryEntry : directoryEntries) {
String entry = directoryEntry.getName();
if (entry.matches(filename + "[0-9]+"))
entries.add(path + '/' + directoryEntry.getName());
}
entries.trimToSize();
String[] filelist = new String[entries.size()];
entries.toArray(filelist);
return filelist;
}
/* calculates the number of files on a volume */
private long calculateTotalSizeOfFilelist(Volume volume, String[] filelist) throws IOException {
long aggregatedSizeInBytes = 0;
for (String file : filelist) {
MRC.Stat stat = volume.getAttr(config.getUserCredentials(), file);
aggregatedSizeInBytes += stat.getSize();
}
return aggregatedSizeInBytes;
}
/* adds the files created by a benchmark to the list of created files */
synchronized void addCreatedFiles(Volume volume, LinkedList<String> newFiles) {
HashSet<String> filelistForVolume;
if (createdFiles.containsKey(volume))
filelistForVolume = createdFiles.get(volume);
else
filelistForVolume = new HashSet();
filelistForVolume.addAll(newFiles);
createdFiles.put(volume, filelistForVolume);
}
/* deletes all files on in the createdFiles list */
void deleteCreatedFiles() {
for (Volume volume : volumes) {
HashSet<String> fileListForVolume = createdFiles.get(volume);
if (null != fileListForVolume) {
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Deleted %s file(s) on volume %s",
fileListForVolume.size(), volume.getVolumeName());
for (String filename : fileListForVolume) {
tryToDeleteFile(volume, filename);
}
}
}
}
/* try to delete a file. log errors, but continue */
private void tryToDeleteFile(Volume volume, String filename) {
try {
volume.unlink(config.getUserCredentials(), filename);
} catch (IOException e) {
Logging.logMessage(Logging.LEVEL_ERROR, Logging.Category.tool, this,
"IO Error while trying to delete a file.");
Logging.logError(Logging.LEVEL_ERROR, Logging.Category.tool, e);
}
}
/* deletes all volumes in the list of created volumes */
void deleteCreatedVolumes() {
for (Volume volume : createdVolumes) {
deleteVolumeIfExisting(volume);
}
}
/* deletes the given volumes */
void deleteVolumes(String... volumeName) {
for (String each : volumeName) {
deleteVolumeIfExisting(each);
}
}
/* delete the default volumes */
void deleteDefaultVolumes(int numberOfVolumes) {
for (int i = 0; i < numberOfVolumes; i++) {
deleteVolumeIfExisting(VOLUME_BASE_NAME + i);
}
}
/* delete a volume specified by a volume ref */
void deleteVolumeIfExisting(Volume volume) {
volume.close();
deleteVolumeIfExisting(volume.getVolumeName());
}
/* delete a volume specified by a string with the volumes name */
void deleteVolumeIfExisting(String volumeName) {
try {
if (new ArrayList<String>(Arrays.asList(client.listVolumeNames())).contains(volumeName)) {
client.deleteVolume(config.getAuth(), config.getUserCredentials(), volumeName);
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Deleted volume %s", volumeName);
}
} catch (IOException e) {
Logging.logMessage(Logging.LEVEL_WARN, Logging.Category.tool, this, "Error while deleting volume %s", volumeName);
Logging.logError(Logging.LEVEL_WARN, this, e);
}
}
/* Performs cleanup on a OSD (because deleting the volume does not delete the files in the volume) */
void cleanupOSD() throws Exception {
String pwd = config.getAdminPassword();
LinkedList<String> uuids = getOSDUUIDs();
for (String osd : uuids) {
Logging.logMessage(Logging.LEVEL_INFO, Logging.Category.tool, this, "Starting cleanup of OSD %s", osd);
client.startCleanUp(osd, pwd, true, true, false, true, 0);
}
boolean cleanUpIsRunning = true;
while (cleanUpIsRunning) {
cleanUpIsRunning = false;
for (String osd : uuids) {
cleanUpIsRunning = cleanUpIsRunning || client.isRunningCleanUp(osd, pwd);
}
Thread.sleep(300);
}
for (String osd : uuids) {
Logging.logMessage(Logging.LEVEL_DEBUG, Logging.Category.tool, this, "Finished cleanup. Result: %s",
client.getCleanUpResult(osd, pwd));
}
}
/* get the list of all OSDs registered a the DIR */
LinkedList<String> getOSDUUIDs() throws IOException {
LinkedList<String> uuids = new LinkedList<String>();
for (DIR.Service service : client.getServiceByType(DIR.ServiceType.SERVICE_TYPE_OSD).getServicesList()) {
uuids.add(service.getUuid());
}
return uuids;
}
/* get the list of volumes */
LinkedList<Volume> getVolumes() {
return volumes;
}
}

View File

@@ -0,0 +1,50 @@
/*
* Copyright (c) 2009-2011 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients;
import org.xtreemfs.foundation.TimeSync;
/**
*
* @author bjko
*/
class CachedXAttr {
private String value;
private long timestamp;
CachedXAttr(String value) {
this.timestamp = TimeSync.getLocalSystemTime();
this.value = value;
}
/**
* @return the value
*/
public String getValue() {
return value;
}
/**
* @param value the value to set
*/
public void setValue(String value) {
this.value = value;
this.timestamp = TimeSync.getLocalSystemTime();
}
/**
* @return the timestamp
*/
public long getTimestamp() {
return timestamp;
}
}

View File

@@ -0,0 +1,268 @@
/*
* Copyright (c) 2009-2011 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.xtreemfs.common.KeyValuePairs;
import org.xtreemfs.common.uuids.ServiceUUID;
import org.xtreemfs.common.uuids.UUIDResolver;
import org.xtreemfs.common.uuids.UnknownUUIDException;
import org.xtreemfs.dir.DIRClient;
import org.xtreemfs.foundation.SSLOptions;
import org.xtreemfs.foundation.TimeSync;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.pbrpc.client.RPCAuthentication;
import org.xtreemfs.foundation.pbrpc.client.RPCNIOSocketClient;
import org.xtreemfs.foundation.pbrpc.client.RPCResponse;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.Auth;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.Service;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.ServiceSet;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.ServiceType;
import org.xtreemfs.pbrpc.generatedinterfaces.DIRServiceClient;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.AccessControlPolicyType;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.KeyValuePair;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicy;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Volumes;
import org.xtreemfs.pbrpc.generatedinterfaces.MRCServiceClient;
import org.xtreemfs.pbrpc.generatedinterfaces.OSDServiceClient;
/**
*
* @author bjko
*/
public class Client {
private final RPCNIOSocketClient mdClient, osdClient;
private final InetSocketAddress[] dirAddress;
private DIRClient dirClient;
private final UUIDResolver uuidRes;
private final Map<String, Volume> volumeMap;
public Client(InetSocketAddress[] dirAddresses, int requestTimeout, int connectionTimeout, SSLOptions ssl)
throws IOException {
this.dirAddress = dirAddresses;
mdClient = new RPCNIOSocketClient(ssl, requestTimeout, connectionTimeout, "Client (dir)");
osdClient = new RPCNIOSocketClient(ssl, requestTimeout, connectionTimeout, "Client (osd)");
DIRServiceClient dirRpcClient = new DIRServiceClient(mdClient, dirAddress[0]);
dirClient = new DIRClient(dirRpcClient, dirAddress, 100, 1000 * 15);
TimeSync.initializeLocal(0);
uuidRes = UUIDResolver.startNonSingelton(dirClient, 3600, 1000);
volumeMap = new HashMap<String, Volume>();
}
public Volume getVolume(String volumeName, UserCredentials credentials) throws IOException {
try {
String lookupVolumeName = volumeName;
int snapNameIndex = volumeName.indexOf('@');
if (snapNameIndex != -1)
lookupVolumeName = volumeName.substring(0, snapNameIndex);
final ServiceSet s = dirClient.xtreemfs_service_get_by_name(null, RPCAuthentication.authNone,
RPCAuthentication.userService, lookupVolumeName);
if (s.getServicesCount() == 0) {
throw new IOException("volume '" + lookupVolumeName + "' does not exist");
}
final Service vol = s.getServices(0);
final String mrcUUIDstr = KeyValuePairs.getValue(vol.getData().getDataList(), "mrc");
final ServiceUUID mrc = new ServiceUUID(mrcUUIDstr, uuidRes);
UserCredentials uc = credentials;
if (uc == null) {
List<String> grps = new ArrayList(1);
grps.add("test");
uc = UserCredentials.newBuilder().setUsername("test").addGroups("test").build();
}
Logging.logMessage(Logging.LEVEL_DEBUG, this, "volume %s on MRC %s/%s", volumeName, mrcUUIDstr,
mrc.getAddress());
Volume v = volumeMap.get(volumeName);
if (v == null) {
v = new Volume(new OSDServiceClient(osdClient, null), new MRCServiceClient(mdClient, mrc
.getAddress()), volumeName, uuidRes, uc);
volumeMap.put(volumeName, v);
}
return v;
} catch (InterruptedException ex) {
throw new IOException("operation was interrupted", ex);
}
}
public void createVolume(String volumeName, Auth authentication, UserCredentials credentials,
StripingPolicy sp, AccessControlPolicyType accessCtrlPolicy, int permissions) throws IOException {
RPCResponse r2 = null;
try {
ServiceSet mrcs = dirClient.xtreemfs_service_get_by_type(null, RPCAuthentication.authNone, credentials,
ServiceType.SERVICE_TYPE_MRC);
if (mrcs.getServicesCount() == 0) {
throw new IOException("no MRC available for volume creation");
}
String uuid = mrcs.getServices(0).getUuid();
ServiceUUID mrcUUID = new ServiceUUID(uuid, uuidRes);
MRCServiceClient m = new MRCServiceClient(mdClient, mrcUUID.getAddress());
r2 = m.xtreemfs_mkvol(null, authentication, credentials, accessCtrlPolicy, sp, "", permissions,
volumeName, credentials.getUsername(), credentials.getGroups(0),
new LinkedList<KeyValuePair>(), 0);
r2.get();
} catch (InterruptedException ex) {
throw new IOException("operation was interrupted", ex);
} finally {
if (r2 != null)
r2.freeBuffers();
}
}
public void createVolume(String volumeName, Auth authentication, UserCredentials credentials,
StripingPolicy sp, AccessControlPolicyType accessCtrlPolicy, int permissions, String mrcUUID)
throws IOException {
RPCResponse<?> r = null;
try {
ServiceUUID uuid = new ServiceUUID(mrcUUID, uuidRes);
uuid.resolve();
MRCServiceClient m = new MRCServiceClient(mdClient, uuid.getAddress());
r = m.xtreemfs_mkvol(uuid.getAddress(), authentication, credentials, accessCtrlPolicy, sp, "", permissions,
volumeName, credentials.getUsername(), credentials.getGroups(0),
new LinkedList<KeyValuePair>(), 0);
r.get();
} catch (InterruptedException ex) {
throw new IOException("operation was interrupted", ex);
} catch (UnknownUUIDException ex) {
throw new IOException("mrc UUID was unknown", ex);
} finally {
if (r != null)
r.freeBuffers();
}
}
public void deleteVolume(String volumeName, Auth authentication, UserCredentials credentials)
throws IOException {
RPCResponse r2 = null;
assert (credentials != null);
try {
final ServiceSet s = dirClient.xtreemfs_service_get_by_name(null, RPCAuthentication.authNone, credentials,
volumeName);
if (s.getServicesCount() == 0) {
throw new IOException("volume '" + volumeName + "' does not exist");
}
final Service vol = s.getServices(0);
final String mrcUUIDstr = KeyValuePairs.getValue(vol.getData().getDataList(), "mrc");
final ServiceUUID mrc = new ServiceUUID(mrcUUIDstr, uuidRes);
MRCServiceClient m = new MRCServiceClient(mdClient, mrc.getAddress());
r2 = m.xtreemfs_rmvol(null, authentication, credentials, volumeName);
r2.get();
} catch (InterruptedException ex) {
throw new IOException("operation was interrupted", ex);
} finally {
if (r2 != null)
r2.freeBuffers();
}
}
public String[] listVolumeNames(UserCredentials credentials) throws IOException {
assert (credentials != null);
try {
final ServiceSet s = dirClient.xtreemfs_service_get_by_type(null, RPCAuthentication.authNone, credentials, ServiceType.SERVICE_TYPE_VOLUME);
String[] volNames = new String[s.getServicesCount()];
for (int i = 0; i < volNames.length; i++)
volNames[i] = s.getServices(i).getName();
return volNames;
} catch (InterruptedException ex) {
throw new IOException("operation was interrupted", ex);
}
}
public String[] listVolumeNames(String mrcUUID, UserCredentials credentials) throws IOException {
RPCResponse<Volumes> r = null;
assert (credentials != null);
try {
final ServiceUUID mrc = new ServiceUUID(mrcUUID, uuidRes);
MRCServiceClient m = new MRCServiceClient(mdClient, mrc.getAddress());
r = m.xtreemfs_lsvol(null, RPCAuthentication.authNone, credentials);
Volumes vols = r.get();
String[] volNames = new String[vols.getVolumesCount()];
for(int i = 0; i < volNames.length; i++)
volNames[i] = vols.getVolumes(i).getName();
return volNames;
} catch (InterruptedException ex) {
throw new IOException("operation was interrupted", ex);
} finally {
if (r != null)
r.freeBuffers();
}
}
public ServiceSet getRegistry() throws IOException {
try {
return dirClient.xtreemfs_service_get_by_type(null, RPCAuthentication.authNone,
RPCAuthentication.userService, ServiceType.SERVICE_TYPE_MIXED);
} catch (InterruptedException ex) {
throw new IOException("operation was interrupted", ex);
}
}
public void start() throws Exception {
mdClient.start();
mdClient.waitForStartup();
osdClient.start();
osdClient.waitForStartup();
}
public synchronized void stop() {
if (dirClient != null) {
try {
mdClient.shutdown();
osdClient.shutdown();
mdClient.waitForShutdown();
osdClient.waitForShutdown();
for (Volume v : volumeMap.values())
v.shutdown();
} catch (Exception ex) {
ex.printStackTrace();
} finally {
dirClient = null;
}
}
}
@Override
public void finalize() {
stop();
}
}

View File

@@ -0,0 +1,555 @@
/*
* Copyright (c) 2009-2011 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.xtreemfs.common.ReplicaUpdatePolicies;
import org.xtreemfs.foundation.json.JSONException;
import org.xtreemfs.foundation.json.JSONParser;
import org.xtreemfs.foundation.json.JSONString;
import org.xtreemfs.foundation.pbrpc.client.RPCAuthentication;
import org.xtreemfs.foundation.pbrpc.client.RPCResponse;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.SYSTEM_V_FCNTL;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Stat;
import org.xtreemfs.pbrpc.generatedinterfaces.OSD.xtreemfs_internal_get_file_sizeResponse;
/**
*
* @author bjko
*/
public class File {
public static final String XTREEMFSSET_REPL_UPDATE_POLICY_XATTR = "xtreemfs.set_repl_update_policy";
public static final String XTREEMFS_DEFAULT_RP = "xtreemfs.default_rp";
private final Volume volume;
private final String path;
private final UserCredentials userCreds;
File(Volume volume, UserCredentials userCreds, String path) {
this.volume = volume;
this.path = path;
this.userCreds = userCreds;
}
public String getPath() {
return path;
}
/**
* check if path is a file
* @param userCreds the user's credentials
* @see java.io.File
* @return true if it is a file, false otherwise (also if path does not exist)
*/
public boolean isFile(UserCredentials userCreds) throws IOException {
Stat stat = volume.stat(path, userCreds);
if (stat != null)
return (stat.getMode() & SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_S_IFREG.getNumber()) > 0;
else
return false;
}
/**
* check if path is a file
* @see java.io.File
* @return true if it is a file, false otherwise (also if path does not exist)
*/
public boolean isFile() throws IOException {
return isFile(userCreds);
}
/**
* check if path is a directory
* @param userCreds the user's credentials
* @see java.io.File
* @return true if it is a directory, false otherwise (also if path does not exist)
*/
public boolean isDirectory(UserCredentials userCreds) throws IOException {
Stat stat = volume.stat(path, userCreds);
if (stat != null)
return (stat.getMode() & SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_S_IFDIR.getNumber()) > 0;
else
return false;
}
/**
* check if path is a directory
* @see java.io.File
* @return true if it is a directory, false otherwise (also if path does not exist)
*/
public boolean isDirectory() throws IOException {
return isDirectory(userCreds);
}
/**
* check if path exists (file or directory)
* @param userCreds the user's credentials
* @see java.io.File
* @return true if it exists, false otherwise
*/
public boolean exists(UserCredentials userCreds) throws IOException {
try {
Stat stat = volume.stat(path, userCreds);
} catch (FileNotFoundException ex) {
return false;
}
return true;
}
/**
* check if path exists (file or directory)
* @see java.io.File
* @return true if it exists, false otherwise
*/
public boolean exists() throws IOException {
return exists(userCreds);
}
public boolean canRead(UserCredentials userCreds) throws IOException {
try {
Stat stat = volume.stat(path, userCreds);
return (stat.getMode() & 0400) > 0;
} catch (FileNotFoundException ex) {
return false;
}
}
public boolean canRead() throws IOException {
return canRead(userCreds);
}
public boolean canWrite(UserCredentials userCreds) throws IOException {
try {
Stat stat = volume.stat(path, userCreds);
return (stat.getMode() & 0200) > 0;
} catch (FileNotFoundException ex) {
return false;
}
}
public boolean canWrite() throws IOException {
return canWrite(userCreds);
}
public long lastModified(UserCredentials userCreds) throws IOException {
Stat stat = volume.stat(path, userCreds);
return stat.getMtimeNs()/1000000;
}
public long lastModified() throws IOException {
return lastModified(userCreds);
}
/**
* get file size
* @param userCreds the user's credentials
* @return the files size in bytes, or 0L if it does not exist
* @throws IOException
*/
public long length(UserCredentials userCreds) throws IOException {
// if the volume is a snapshot, perform a size glimpse at the OSD
if (volume.isSnapshot()) {
RPCResponse<xtreemfs_internal_get_file_sizeResponse> fs = null;
try {
RandomAccessFile file = volume.openFile(this, SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDONLY.getNumber(), 0, userCreds);
fs = volume.osdClient.xtreemfs_internal_get_file_size(getReplica(0).getOSDAddress(0), RPCAuthentication.authNone, RPCAuthentication.userService,
file.getCredentials(), file.getFileId());
return fs.get().getFileSize();
} catch (Exception exc) {
exc.printStackTrace();
return 0;
} finally {
if (fs != null)
fs.freeBuffers();
}
}
// otherwise, fetch the file size from the MRC
else {
Stat stat = volume.stat(path, userCreds);
if (stat != null) {
return stat.getSize();
} else
return 0L;
}
}
/**
* get file size
* @return the files size in bytes, or 0L if it does not exist
* @throws IOException
*/
public long length() throws IOException {
return length(userCreds);
}
public void mkdir(int permissions, UserCredentials userCreds) throws IOException {
volume.mkdir(path, permissions, userCreds);
}
public void mkdir(int permissions) throws IOException {
mkdir(permissions, userCreds);
}
public void createFile(UserCredentials userCreds) throws IOException {
volume.touch(path, userCreds);
}
public void createFile() throws IOException {
createFile(userCreds);
}
public Stat stat(UserCredentials userCreds) throws IOException {
return volume.stat(path, userCreds);
}
public Stat stat() throws IOException {
return stat(userCreds);
}
public void renameTo(File dest, UserCredentials userCreds) throws IOException {
volume.rename(this.path,dest.path, userCreds);
}
public void renameTo(File dest) throws IOException {
renameTo(dest, userCreds);
}
public void delete(UserCredentials userCreds) throws IOException {
volume.unlink(this.path, userCreds);
}
public void delete() throws IOException {
delete(userCreds);
}
public String getxattr(String name, UserCredentials userCreds) throws IOException {
return volume.getxattr(path, name, userCreds);
}
public String getxattr(String name) throws IOException {
return getxattr(name, userCreds);
}
public String[] listXAttrs(UserCredentials userCreds) throws IOException {
return volume.listxattr(path, userCreds);
}
public String[] listXAttrs() throws IOException {
return listXAttrs(userCreds);
}
public void setxattr(String name, String value, UserCredentials userCreds) throws IOException {
volume.setxattr(path, name, value, userCreds);
}
public void setxattr(String name, String value) throws IOException {
setxattr(name, value, userCreds);
}
public void chmod(int mode, UserCredentials userCreds) throws IOException {
volume.chmod(path, mode, userCreds);
}
public void chmod(int mode) throws IOException {
chmod(mode, userCreds);
}
public void chown(String user, UserCredentials userCreds) throws IOException {
volume.chown(path, user, userCreds);
}
public void chown(String user) throws IOException {
chown(user, userCreds);
}
public void chgrp(String group, UserCredentials userCreds) throws IOException {
volume.chgrp(path, group, userCreds);
}
public void chgrp(String group) throws IOException {
chgrp(group, userCreds);
}
public void setACL(Map<String, Object> aclEntries, UserCredentials userCreds) throws IOException {
volume.setACL(path, aclEntries, userCreds);
}
public void setACL(Map<String, Object> aclEntries) throws IOException {
setACL(aclEntries, userCreds);
}
public Map<String, Object> getACL(UserCredentials userCreds) throws IOException {
return volume.getACL(path, userCreds);
}
public Map<String, Object> getACL() throws IOException {
return getACL(userCreds);
}
public RandomAccessFile open(String openMode, int permissions, UserCredentials userCreds) throws IOException {
int flags = 0;
if (openMode.contains("rw")) {
flags |= SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber();
flags |= SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_CREAT.getNumber();
} else if (openMode.contains("r")) {
flags |= SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDONLY.getNumber();
}
if (openMode.contains("t")) {
flags |= SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_TRUNC.getNumber();
}
if (openMode.contains("d") || openMode.contains("s")) {
flags |= SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_SYNC.getNumber();
}
return volume.openFile(this, flags, permissions, userCreds);
}
public RandomAccessFile open(String openMode, int permissions) throws IOException {
return open(openMode, permissions, userCreds);
}
public int getNumReplicas(UserCredentials userCreds) throws IOException {
try {
Map<String,Object> xloc = getLocations(userCreds);
List<Map<String,Object>> replicas = (List<Map<String, Object>>) xloc.get("replicas");
return replicas.size();
} catch (ClassCastException ex) {
throw new IOException("cannot parse file's location list: "+ex,ex);
}
}
public int getNumReplicas() throws IOException {
return getNumReplicas(userCreds);
}
public Replica getReplica(int replicaNo, UserCredentials userCreds) throws IOException {
try {
Map<String,Object> xloc = getLocations(userCreds);
List<Map<String,Object>> replicas = (List<Map<String, Object>>) xloc.get("replicas");
if (replicas.size() <= replicaNo)
throw new IllegalArgumentException("replicaNo is out of bounds");
return new Replica(this,replicas.get(replicaNo),userCreds);
} catch (JSONException ex) {
throw new IOException("cannot parse file's location list: "+ex,ex);
} catch (ClassCastException ex) {
throw new IOException("cannot parse file's location list: "+ex,ex);
}
}
public Replica getReplica(int replicaNo) throws IOException {
return getReplica(replicaNo, userCreds);
}
public Replica getReplica(String osdUUID, UserCredentials userCreds) throws IOException {
Replica[] repls = getReplicas(userCreds);
for (Replica r : repls) {
for (int i = 0; i < r.getStripeWidth(); i++) {
if (r.getOSDUuid(i).equals(osdUUID))
return r;
}
}
return null;
}
public Replica getReplica(String osdUUID) throws IOException {
return getReplica(osdUUID, userCreds);
}
public Replica[] getReplicas(UserCredentials userCreds) throws IOException {
try {
Map<String,Object> xloc = getLocations(userCreds);
List<Map<String,Object>> replicas = (List<Map<String, Object>>) xloc.get("replicas");
Replica[] repls = new Replica[replicas.size()];
for (int i = 0; i < repls.length; i++)
repls[i] = new Replica(this,replicas.get(i),userCreds);
return repls;
} catch (JSONException ex) {
throw new IOException("cannot parse file's location list",ex);
} catch (ClassCastException ex) {
throw new IOException("cannot parse file's location list",ex);
}
}
public Replica[] getReplicas() throws IOException {
return getReplicas(userCreds);
}
public void setDefaultReplication(String policy, int numReplicas, UserCredentials userCreds) throws IOException {
String JSON = "{ \"update-policy\" : \""+policy+"\", \"replication-factor\" : "+numReplicas+" }";
if (!isDirectory())
throw new IOException("only diretories (including root) have a default replication policy");
volume.setxattr(path, XTREEMFS_DEFAULT_RP, JSON, userCreds);
}
public void setDefaultReplication(String policy, int numReplicas) throws IOException {
setDefaultReplication(policy, numReplicas, userCreds);
}
public boolean isReadOnlyReplicated(UserCredentials userCreds) throws IOException {
try {
Map<String,Object> xloc = getLocations(userCreds);
String uPolicy = (String) xloc.get("update-policy");
return uPolicy.equals(ReplicaUpdatePolicies.REPL_UPDATE_PC_RONLY);
} catch (ClassCastException ex) {
throw new IOException("cannot parse file's location list",ex);
}
}
public boolean isReadOnlyReplicated() throws IOException {
return isReadOnlyReplicated(userCreds);
}
public void setReadOnly(boolean mode, UserCredentials userCreds) throws Exception {
boolean currentMode = Boolean.valueOf(getxattr("xtreemfs.read_only"));
if (currentMode == mode)
return;
if (mode) {
//make sure the file is not open!
//open file
RandomAccessFile raf = open("r", 0, userCreds);
//fetch file sizes
long osd_file_size = raf.getFileSizeOnOSD();
long mrc_file_size = length(userCreds);
//update file size if incorrect on MRC
if (osd_file_size != mrc_file_size) {
raf.forceFileSize(osd_file_size);
}
setxattr("xtreemfs.read_only", "true");
} else {
if (getNumReplicas() > 1)
throw new IOException("File has still replicas.");
else {
// set read only
setxattr("xtreemfs.read_only", "false");
}
}
}
public void setReadOnly(boolean mode) throws Exception {
setReadOnly(mode, userCreds);
}
public boolean isReadOnly(UserCredentials userCreds) throws IOException {
return Boolean.valueOf(getxattr("xtreemfs.read_only", userCreds));
}
public boolean isReadOnly() throws IOException {
return isReadOnly(userCreds);
}
public boolean isReplicated(UserCredentials userCreds) throws IOException {
Map<String,Object> l = getLocations(userCreds);
String updatePolicy = (String)l.get("update-policy");
return !updatePolicy.equals(ReplicaUpdatePolicies.REPL_UPDATE_PC_NONE);
}
public boolean isReplicated() throws IOException {
return isReplicated(userCreds);
}
public String[] getSuitableOSDs(int numOSDs, UserCredentials userCreds) throws IOException {
List<String> osds = volume.getSuitableOSDs(this, numOSDs, userCreds);
return osds.toArray(new String[osds.size()]);
}
public String[] getSuitableOSDs(int numOSDs) throws IOException {
return getSuitableOSDs(numOSDs, userCreds);
}
public void addReplica(int width, String[] osdUuids, int flags, UserCredentials userCreds) throws IOException {
List<String> osdSet = new ArrayList(20);
for (String osd : osdUuids) {
if (osdSet.size() == width)
break;
osdSet.add(osd);
}
if (osdSet.size() != width)
throw new IllegalArgumentException("number of OSDs must be equal to width!");
volume.addReplica(this, width, osdSet, flags, userCreds);
}
public void addReplica(int width, String[] osdUuids, int flags) throws IOException {
addReplica(width, osdUuids, flags, userCreds);
}
public void setReplicaUpdatePolicy(String policy, UserCredentials userCreds) throws IOException {
volume.setxattr(this.getPath(), XTREEMFSSET_REPL_UPDATE_POLICY_XATTR, policy, userCreds);
}
public void setReplicaUpdatePolicy(String policy) throws IOException {
setReplicaUpdatePolicy(policy, userCreds);
}
public String getReplicaUpdatePolicy(UserCredentials userCreds) throws IOException {
try {
String loc = this.volume.getxattr(this.getPath(), "xtreemfs.locations", userCreds);
if ( (loc != null) && (loc.length() > 0) ) {
Map<String,Object> location = (Map<String, Object>) JSONParser.parseJSON(new JSONString(loc));
return (String) location.get("update-policy");
} else {
throw new IOException("cannot retrieve file's location list (is empty)");
}
} catch (JSONException ex) {
throw new IOException("cannot parse file's location list",ex);
} catch (ClassCastException ex) {
throw new IOException("cannot parse file's location list",ex);
}
}
public String getReplicaUpdatePolicy() throws IOException {
return getReplicaUpdatePolicy(userCreds);
}
Map<String,Object> getLocations(UserCredentials userCreds) throws IOException {
try {
String loc = this.volume.getxattr(this.getPath(), "xtreemfs.locations", userCreds);
if ( (loc != null) && (loc.length() > 0) ) {
return (Map<String, Object>) JSONParser.parseJSON(new JSONString(loc));
} else {
throw new IOException("cannot retrieve file's location list (is empty)");
}
} catch (JSONException ex) {
throw new IOException("cannot parse file's location list",ex);
} catch (ClassCastException ex) {
throw new IOException("cannot parse file's location list",ex);
}
}
void removeReplica(String headOSDuuid, UserCredentials userCreds) throws IOException {
if (!this.isFile())
throw new IOException("cannot remove replica from a non-file object");
volume.removeReplica(this, headOSDuuid, userCreds);
}
}

View File

@@ -0,0 +1,23 @@
/*
* Copyright (c) 2009-2011 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients;
import java.io.IOException;
/**
*
* @author bjko
*/
public class InvalidChecksumException extends IOException {
public InvalidChecksumException(String message) {
super(message);
}
}

View File

@@ -0,0 +1,716 @@
/*
* Copyright (c) 2009-2011 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.xtreemfs.common.clients.internal.ObjectMapper;
import org.xtreemfs.common.clients.internal.ObjectMapper.ObjectRequest;
import org.xtreemfs.common.uuids.ServiceUUID;
import org.xtreemfs.common.uuids.UnknownUUIDException;
import org.xtreemfs.common.xloc.Replica;
import org.xtreemfs.common.xloc.StripingPolicyImpl;
import org.xtreemfs.foundation.buffer.BufferPool;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.pbrpc.client.PBRPCException;
import org.xtreemfs.foundation.pbrpc.client.RPCAuthentication;
import org.xtreemfs.foundation.pbrpc.client.RPCResponse;
import org.xtreemfs.foundation.pbrpc.client.RPCResponseAvailableListener;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.ErrorType;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.osd.InternalObjectData;
import org.xtreemfs.osd.replication.ObjectSet;
import org.xtreemfs.pbrpc.generatedinterfaces.OSDServiceClient;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.FileCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.OSDWriteResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.REPL_FLAG;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XCap;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XLocSet;
import org.xtreemfs.pbrpc.generatedinterfaces.OSD.ObjectData;
import org.xtreemfs.pbrpc.generatedinterfaces.OSD.ObjectList;
import org.xtreemfs.pbrpc.generatedinterfaces.OSD.xtreemfs_internal_get_file_sizeResponse;
/**
*
* @author bjko
*/
public class RandomAccessFile {
public static final int WAIT_MS_BETWEEN_WRITE_SWITCHOVER = 1000;
private final File parent;
private final OSDServiceClient osdClient;
private final Volume parentVolume;
private final boolean readOnly;
private final boolean syncMetadata;
private long position;
private FileCredentials credentials;
private Replica currentReplica;
private int currentReplicaNo;
private final int numReplicas;
private final String fileId;
private ObjectMapper oMapper;
private boolean closed;
private UserCredentials userCreds;
private final AtomicReference<XCap> uptodateCap;
RandomAccessFile(File parent, Volume parentVolume, OSDServiceClient client, FileCredentials fc, boolean readOnly, boolean syncMetadata, UserCredentials userCreds) {
this.parent = parent;
this.parentVolume = parentVolume;
this.osdClient = client;
this.credentials = fc;
this.readOnly = readOnly;
this.syncMetadata = syncMetadata;
this.userCreds = userCreds;
position = 0;
currentReplicaNo = 0;
closed = false;
numReplicas = credentials.getXlocs().getReplicasCount();
fileId = fc.getXcap().getFileId();
uptodateCap = new AtomicReference(fc.getXcap());
selectReplica(0);
}
public void updateCap(XCap newXCap) {
uptodateCap.set(newXCap);
}
protected void setXCap() {
credentials = credentials.toBuilder().setXcap(uptodateCap.get()).build();
}
protected void switchToNextReplica() {
selectReplica((currentReplicaNo + 1) % numReplicas);
}
protected void selectReplica(int replicaNo) {
currentReplicaNo = replicaNo;
currentReplica = new Replica(credentials.getXlocs().getReplicas(currentReplicaNo), null);
oMapper = ObjectMapper.getMapper(currentReplica.getStripingPolicy().getPolicy());
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"now using replica %d (%s)",replicaNo,credentials.getXlocs().getReplicasList());
}
public XLocSet getLocationsList() {
return credentials.getXlocs();
}
public int getCurrentReplicaStripeSize() {
return currentReplica.getStripingPolicy().getStripeSizeForObject(0);
}
public int getCurrentReplicaStripeingWidth() {
return currentReplica.getStripingPolicy().getWidth();
}
public void forceReplica(int replicaNo) {
if ((replicaNo > numReplicas-1) || (replicaNo < 0))
throw new IllegalArgumentException("invalid replica number");
selectReplica(replicaNo);
}
public void forceReplica(String headOSDuuid) {
final int numRepl = credentials.getXlocs().getReplicasCount();
for (int i = 0; i < numRepl; i++) {
org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replica r = credentials.getXlocs().getReplicas(i);
if (r.getOsdUuids(0).equals(headOSDuuid)) {
selectReplica(i);
return;
}
}
throw new IllegalArgumentException("osd "+headOSDuuid+" not in any of the replicas: "+credentials.getXlocs().getReplicasList());
}
public int getCurrentReplica() {
return currentReplicaNo;
}
public String getFileId() {
return credentials.getXcap().getFileId();
}
public File getFile() {
return parent;
}
public boolean isReadOnly() {
return this.readOnly;
}
public int read(byte[] data, int offset, int length) throws IOException {
ReusableBuffer buf = ReusableBuffer.wrap(data, offset, length);
return read(buf);
}
public int read(ReusableBuffer data) throws IOException {
if (closed) {
throw new IllegalStateException("file was closed");
}
int numTries = 0;
IOException cause = null;
do {
List<ObjectRequest> ors = oMapper.readRequest(data.remaining(), position, currentReplica);
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"read from file %s: %d bytes from offset %d (= %d obj rqs)",fileId,data.remaining(),position,ors.size());
if (ors.isEmpty()) {
return 0;
}
final AtomicInteger responseCnt = new AtomicInteger(ors.size());
RPCResponse<ObjectData>[] resps = new RPCResponse[ors.size()];
try {
final RPCResponseAvailableListener rl = new RPCResponseAvailableListener<ObjectData>() {
@Override
public void responseAvailable(RPCResponse<ObjectData> r) {
if (responseCnt.decrementAndGet() == 0) {
//last response
synchronized (responseCnt) {
responseCnt.notify();
}
}
}
};
setXCap();
for (int i = 0; i < ors.size(); i++) {
ObjectRequest or = ors.get(i);
ServiceUUID osd = new ServiceUUID(or.getOsdUUID(), parentVolume.uuidResolver);
RPCResponse<ObjectData> r = osdClient.read(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService,
credentials, fileId, or.getObjNo(), -1, or.getOffset(), or.getLength());
resps[i] = r;
r.registerListener(rl);
}
synchronized (responseCnt) {
if (responseCnt.get() > 0) {
responseCnt.wait();
}
}
//assemble responses
InternalObjectData[] ods = new InternalObjectData[ors.size()];
for (int i = 0; i < ors.size(); i++) {
ods[i] = new InternalObjectData(resps[i].get(),resps[i].getData());
}
int numBytesRead = 0;
for (InternalObjectData od : ods) {
if (od.getData() != null) {
numBytesRead += od.getData().remaining();
data.put(od.getData());
BufferPool.free(od.getData());
}
if (od.getZero_padding() > 0) {
numBytesRead += od.getZero_padding();
for (int i = 0; i < od.getZero_padding(); i++)
data.put((byte)0);
}
}
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"read returned %d bytes",numBytesRead);
position += numBytesRead;
return numBytesRead;
} catch (InterruptedException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
cause = new IOException("operation aborted", ex);
} catch (PBRPCException ex) {
if (ex.getErrorType() == ErrorType.REDIRECT) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"redirected to: %s",ex.getRedirectToServerUUID());
forceReplica(ex.getRedirectToServerUUID());
continue;
} else if (ex.getErrorType() == ErrorType.ERRNO) {
cause = ex;
} else {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
cause = new IOException("communication failure", ex);
}
} catch (IOException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
cause = ex;
} catch (Throwable th) {
th.printStackTrace();
throw new IOException("nasty!");
} finally {
for (RPCResponse r : resps)
r.freeBuffers();
}
numTries++;
try {
Thread.sleep(WAIT_MS_BETWEEN_WRITE_SWITCHOVER);
} catch (InterruptedException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("operation aborted", ex);
}
switchToNextReplica();
} while (numTries < numReplicas+parentVolume.getMaxRetries());
throw cause;
}
public int checkObject(long objectNo) throws IOException {
if (closed) {
throw new IllegalStateException("file was closed");
}
RPCResponse<ObjectData> r = null;
try {
ServiceUUID osd = new ServiceUUID(currentReplica.getOSDForObject(objectNo).toString(), parentVolume.uuidResolver);
setXCap();
r = osdClient.xtreemfs_check_object(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService,
credentials, fileId, objectNo, 0l);
ObjectData od = r.get();
if (od.getInvalidChecksumOnOsd()) {
// try next replica
throw new InvalidChecksumException("object " + objectNo + " has an invalid checksum");
}
return od.getZeroPadding();
} catch (InterruptedException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("operation aborted", ex);
} catch (PBRPCException ex) {
if (ex.getErrorType() == ErrorType.ERRNO)
throw ex;
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("communication failure", ex);
} finally {
if (r != null)
r.freeBuffers();
}
}
/**
* Writes bytesToWrite bytes from the writeFromBuffer byte array starting at
* offset to this file.
*
* @param writeFromBuffer
* @param offset
* @param bytesToWrite
* @return the number of bytes written
* @throws Exception
*/
public int write(byte[] data, int offset, int length) throws IOException {
ReusableBuffer buf = ReusableBuffer.wrap(data, offset, length);
return write(buf);
}
public int write(ReusableBuffer data) throws IOException {
if (readOnly) {
throw new IOException("File is marked as read-only.");
}
if (closed) {
throw new IllegalStateException("file was closed");
}
if (data.remaining() == 0) {
return 0;
}
List<ObjectRequest> ors = oMapper.writeRequest(data, position, currentReplica);
if (ors.isEmpty()) {
return 0;
}
int numTries = 0;
IOException cause = null;
do {
final AtomicInteger responseCnt = new AtomicInteger(ors.size());
RPCResponse[] resps = new RPCResponse[ors.size()];
try {
final RPCResponseAvailableListener rl = new RPCResponseAvailableListener<OSDWriteResponse>() {
@Override
public void responseAvailable(RPCResponse<OSDWriteResponse> r) {
if (responseCnt.decrementAndGet() == 0) {
//last response
synchronized (responseCnt) {
responseCnt.notify();
}
}
}
};
int bytesWritten = 0;
setXCap();
ObjectData objData = ObjectData.newBuilder().setChecksum(0).setInvalidChecksumOnOsd(false).setZeroPadding(0).build();
for (int i = 0; i < ors.size(); i++) {
ObjectRequest or = ors.get(i);
or.getData().position(0);
bytesWritten += or.getData().capacity();
ServiceUUID osd = new ServiceUUID(or.getOsdUUID(), parentVolume.uuidResolver);
RPCResponse<OSDWriteResponse> r = osdClient.write(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService,
credentials, fileId, or.getObjNo(), -1, or.getOffset(), 0l,
objData, or.getData());
resps[i] = r;
r.registerListener(rl);
}
synchronized (responseCnt) {
if (responseCnt.get() > 0) {
responseCnt.wait();
}
}
//assemble responses
OSDWriteResponse owr = null;
for (int i = 0; i < ors.size(); i++) {
owr = (OSDWriteResponse) resps[i].get();
}
setXCap();
parentVolume.storeFileSizeUpdate(fileId, owr, userCreds);
if (syncMetadata) {
parentVolume.pushFileSizeUpdate(fileId, userCreds);
}
data.flip();
position += bytesWritten;
return bytesWritten;
} catch (InterruptedException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
cause = new IOException("operation aborted", ex);
} catch (PBRPCException ex) {
if (ex.getErrorType() == ErrorType.REDIRECT) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"redirected to: %s",ex.getRedirectToServerUUID());
forceReplica(ex.getRedirectToServerUUID());
continue;
} else if (ex.getErrorType() == ErrorType.ERRNO) {
cause = ex;
} else {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
cause = new IOException("communication failure", ex);
}
} catch (IOException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
cause = ex;
} finally {
for (RPCResponse r : resps) {
if (r != null)
r.freeBuffers();
}
}
numTries++;
switchToNextReplica();
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"write failed (%s), switched to replica: %s",cause,currentReplica);
try {
Thread.sleep(WAIT_MS_BETWEEN_WRITE_SWITCHOVER);
} catch (InterruptedException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("operation aborted", ex);
}
} while (numTries < numReplicas+parentVolume.getMaxRetries());
throw cause;
}
public void seek(long position) {
this.position = position;
}
public long getFilePointer() {
return position;
}
public void close() throws IOException {
this.closed = true;
parentVolume.closeFile(this,fileId, readOnly, userCreds);
}
public void fsync() throws IOException {
parentVolume.pushFileSizeUpdate(fileId, userCreds);
}
public long length() throws IOException {
return parent.length();
}
public long getNumObjects() throws IOException {
long flength = length();
if (flength > 0)
return currentReplica.getStripingPolicy().getObjectNoForOffset(flength-1)+1;
else
return 0;
}
public void forceFileSize(long newFileSize) throws IOException {
XCap truncCap = parentVolume.truncateFile(fileId, userCreds);
try {
parentVolume.storeFileSizeUpdate(fileId, OSDWriteResponse.newBuilder()
.setSizeInBytes(newFileSize).setTruncateEpoch(truncCap.getTruncateEpoch()).build(),
userCreds);
parentVolume.pushFileSizeUpdate(fileId, userCreds);
if (position > newFileSize)
position = newFileSize;
} catch (PBRPCException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("communication failure", ex);
}
}
long getFileSizeOnOSD() throws IOException {
if (closed) {
throw new IllegalStateException("file was closed");
}
RPCResponse<xtreemfs_internal_get_file_sizeResponse> r = null;
try {
ServiceUUID osd = new ServiceUUID(currentReplica.getHeadOsd().toString(), parentVolume.uuidResolver);
setXCap();
r = osdClient.xtreemfs_internal_get_file_size(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService, credentials, fileId);
return r.get().getFileSize();
} catch (InterruptedException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("operation aborted", ex);
} catch (PBRPCException ex) {
if (ex.getErrorType() == ErrorType.REDIRECT) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"redirected to: %s",ex.getRedirectToServerUUID());
forceReplica(ex.getRedirectToServerUUID());
return getFileSizeOnOSD();
} else if (ex.getErrorType() == ErrorType.ERRNO) {
throw ex;
} else {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("communication failure", ex);
}
} finally {
if (r != null)
r.freeBuffers();
}
}
boolean isCompleteReplica(int replicaNo) throws IOException {
if (closed) {
throw new IllegalStateException("file was closed");
}
RPCResponse<ObjectList> r = null;
try {
org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replica replica = this.credentials.getXlocs().getReplicas(replicaNo);
if ((replica.getReplicationFlags() & REPL_FLAG.REPL_FLAG_IS_COMPLETE.getNumber()) > 0) {
return true;
}
setXCap();
StripingPolicyImpl sp = StripingPolicyImpl.getPolicy(replica, 0);
long lastObjectNo = sp.getObjectNoForOffset(this.credentials.getXlocs().getReadOnlyFileSize() - 1);
int osdRelPos = 0;
for (String osdUUID : replica.getOsdUuidsList()) {
ServiceUUID osd = new ServiceUUID(osdUUID, parentVolume.uuidResolver);
r = osdClient.xtreemfs_internal_get_object_set(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService, credentials, fileId);
ObjectList ol = r.get();
r.freeBuffers();
r = null;
byte[] serializedBitSet = ol.getSet().toByteArray();
ObjectSet oset = null;
try {
oset = new ObjectSet(replicaNo, replicaNo, serializedBitSet);
} catch (Exception ex) {
throw new IOException("cannot deserialize object set: "+ex,ex);
}
for (long objNo = osdRelPos; objNo <= lastObjectNo; objNo += sp.getWidth()) {
if (oset.contains(objNo) == false)
return false;
}
}
//FIXME: mark replica as complete
try {
parent.setxattr("xtreemfs.mark_replica_complete", replica.getOsdUuids(0));
} catch (Exception ex) {
//only an optimization, ignore errors
}
return true;
} catch (InterruptedException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("operation aborted", ex);
} catch (PBRPCException ex) {
if (ex.getErrorType() == ErrorType.REDIRECT) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"redirected to: %s",ex.getRedirectToServerUUID());
forceReplica(ex.getRedirectToServerUUID());
return isCompleteReplica(replicaNo);
} else if (ex.getErrorType() == ErrorType.ERRNO) {
throw ex;
} else {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("communication failure", ex);
}
} finally {
if (r != null)
r.freeBuffers();
}
}
public int getReplicaNumber(String headOSDuuid) {
for (int i = 0; i < this.credentials.getXlocs().getReplicasCount(); i++) {
org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replica replica = this.credentials.getXlocs().getReplicas(i);
if (replica.getOsdUuidsList().contains(headOSDuuid))
return i;
}
throw new IllegalArgumentException("osd '"+headOSDuuid+"' is not in any replica of this file");
}
/**
* Triggers the initial replication of the current replica by reading the
* first object on each OSD.
*/
public void triggerInitialReplication() throws IOException {
// send requests to all OSDs of this replica
try {
setXCap();
List<ServiceUUID> osdList = currentReplica.getOSDs();
List<ServiceUUID> osdListCopy = new ArrayList<ServiceUUID>(currentReplica.getOSDs());
// take lowest objects of file
for (int objectNo = 0; osdListCopy.size() != 0; objectNo++) {
// get index of OSD for this object
ServiceUUID osd = currentReplica.getOSDForObject(objectNo);
// remove OSD
osdListCopy.remove(osd);
// send request (read only 1 byte)
RPCResponse<ObjectData> r = osdClient.read(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService,
credentials, fileId, objectNo, 0, 0, 1);
r.get();
r.freeBuffers();
}
} catch (UnknownUUIDException e) {
// ignore; should not happen
} catch (PBRPCException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this, "comm error: %s", ex.toString());
throw new IOException("communication failure", ex);
} catch (IOException e) {
throw new IOException("At least one OSD could not be contacted to replicate the file.", e);
} catch (InterruptedException e) {
// ignore
}
}
public void setLength(long newLength) throws IOException {
XCap truncCap = parentVolume.truncateFile(fileId, userCreds);
FileCredentials tCred = FileCredentials.newBuilder().setXcap(truncCap).setXlocs(this.credentials.getXlocs()).build();
RPCResponse<OSDWriteResponse> r = null;
try {
setXCap();
ServiceUUID osd = new ServiceUUID(currentReplica.getHeadOsd().toString(), parentVolume.uuidResolver);
r = osdClient.truncate(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService, tCred, fileId, newLength);
OSDWriteResponse resp = r.get();
parentVolume.storeFileSizeUpdate(fileId, resp, userCreds);
parentVolume.pushFileSizeUpdate(fileId, userCreds);
if (position > newLength)
position = newLength;
} catch (InterruptedException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("operation aborted", ex);
} catch (PBRPCException ex) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, this,"comm error: %s",ex.toString());
throw new IOException("communication failure", ex);
} finally {
if (r != null)
r.freeBuffers();
}
}
protected FileCredentials getCredentials() {
return credentials;
}
public void flush() throws IOException {
fsync();
}
}

View File

@@ -0,0 +1,182 @@
/*
* Copyright (c) 2009-2011 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.Map;
import org.xtreemfs.common.xloc.ReplicationFlags;
import org.xtreemfs.foundation.json.JSONException;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.REPL_FLAG;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicyType;
/**
*
* @author bjko
*/
public class Replica {
private final File parentFile;
private final String[] osdAddresses;
private final String[] osdUUIDs;
private final StripingPolicyType stripingPolicy;
private final int stripeSize;
private final int stripingWidth;
private final int replicationFlags;
private UserCredentials userCreds;
Replica(File parentFile, Map<String,Object> json, UserCredentials userCreds) throws JSONException {
this.parentFile = parentFile;
this.userCreds = userCreds;
try {
Map<String,Object> sp = (Map<String, Object>) json.get("striping-policy");
String spName = (String) sp.get("pattern");
if (spName.equals("STRIPING_POLICY_RAID0")) {
stripingPolicy = StripingPolicyType.STRIPING_POLICY_RAID0;
} else {
throw new JSONException("Unknown striping policy type: "+spName);
}
Long tmp = (Long)sp.get("size");
stripeSize = tmp.intValue();
tmp = (Long) sp.get("width");
stripingWidth = tmp.intValue();
tmp = (Long) json.get("replication-flags");
replicationFlags = tmp.intValue();
List<Map<String,Object>> osds = (List<Map<String, Object>>) json.get("osds");
osdAddresses = new String[stripingWidth];
osdUUIDs = new String[stripingWidth];
if (osds.size() != stripingWidth) {
throw new JSONException("replica information incorrect, OSD count < stripingWidth: "+stripingWidth);
}
for (int i = 0; i < stripingWidth; i++) {
Map<String,Object> osd = osds.get(i);
osdAddresses[i] = (String) osd.get("address");
osdUUIDs[i] = (String) osd.get("uuid");
}
} catch (Exception ex) {
ex.printStackTrace();
throw new JSONException("malformed JSON replica representation: "+ex);
}
}
/**
* Get the location (inet address) of the replica's head OSD
* @return
*/
public InetSocketAddress getLocation() {
return getOSDAddress(0);
}
public InetSocketAddress getOSDAddress(int osdNum) {
int colon = osdAddresses[osdNum].indexOf(":");
String hostname = osdAddresses[osdNum].substring(0,colon);
String portStr = osdAddresses[osdNum].substring(colon+1);
int port = Integer.valueOf(portStr);
return new InetSocketAddress(hostname,port);
}
public String getOSDUuid(int osdNum) {
return osdUUIDs[osdNum];
}
public int getStripeWidth() {
return stripingWidth;
}
public int getStripeSize() {
return stripeSize;
}
public StripingPolicyType getStripingPolicy() {
return stripingPolicy;
}
public boolean isFullReplica() {
return (replicationFlags & REPL_FLAG.REPL_FLAG_FULL_REPLICA.getNumber()) != 0;
}
public boolean isRandomStrategy() {
return ReplicationFlags.isRandomStrategy(replicationFlags);
}
public boolean isSequentialStrategy() {
return ReplicationFlags.isSequentialStrategy(replicationFlags);
}
public boolean isSequentialPrefetchingStrategy() {
return ReplicationFlags.isSequentialPrefetchingStrategy(replicationFlags);
}
public boolean isRarestFirstStrategy() {
return ReplicationFlags.isRarestFirstStrategy(replicationFlags);
}
/**
* checks if the replica is complete (holds all objects of a file).
*
* @return true, if the replica is marked as complete or if the
*/
public boolean isCompleteReplica() throws IOException {
if ((replicationFlags & REPL_FLAG.REPL_FLAG_IS_COMPLETE.getNumber()) != 0) {
return true;
} else {
RandomAccessFile raf = parentFile.open("r", 0);
int myNum = raf.getReplicaNumber(osdUUIDs[0]);
boolean isComplete = raf.isCompleteReplica(myNum);
raf.close();
return isComplete;
}
}
public void removeReplica(boolean checkForCompleteReplica) throws IOException {
final Replica[] replicas = parentFile.getReplicas(userCreds);
if (replicas.length == 1)
throw new IOException("cannot remove last replica (delete file instead!)");
if (checkForCompleteReplica) {
//FIXME: make sure at least one complete replica still exists
boolean completeReplica = false;
for (Replica r : replicas) {
if (r.getOSDUuid(0).equals(this.osdUUIDs[0])) {
//ignore myself
continue;
}
if (r.isCompleteReplica()) {
completeReplica = true;
break;
}
}
if (!completeReplica) {
throw new IOException("cannot remove last complete replica!");
}
}
parentFile.removeReplica(osdUUIDs[0], userCreds);
}
}

View File

@@ -0,0 +1,885 @@
/*
* Copyright (c) 2009-2011 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.xtreemfs.common.ReplicaUpdatePolicies;
import org.xtreemfs.common.clients.internal.OpenFileList;
import org.xtreemfs.common.uuids.ServiceUUID;
import org.xtreemfs.common.uuids.UUIDResolver;
import org.xtreemfs.foundation.json.JSONException;
import org.xtreemfs.foundation.json.JSONParser;
import org.xtreemfs.foundation.json.JSONString;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.pbrpc.client.PBRPCException;
import org.xtreemfs.foundation.pbrpc.client.RPCAuthentication;
import org.xtreemfs.foundation.pbrpc.client.RPCResponse;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.POSIXErrno;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.FileCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.OSDWriteResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicy;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.VivaldiCoordinates;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XCap;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.DirectoryEntries;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.DirectoryEntry;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Setattrs;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Stat;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.StatVFS;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.XAttr;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.getattrResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.getxattrResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.listxattrResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.openResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.unlinkResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.xtreemfs_get_suitable_osdsRequest;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.xtreemfs_get_suitable_osdsResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.xtreemfs_replica_addRequest;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.xtreemfs_replica_removeRequest;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.xtreemfs_update_file_sizeRequest;
import org.xtreemfs.pbrpc.generatedinterfaces.MRCServiceClient;
import org.xtreemfs.pbrpc.generatedinterfaces.OSDServiceClient;
import com.google.protobuf.ByteString;
/**
*
* @author bjko
*/
public class Volume {
private final MRCServiceClient mrcClient;
final UUIDResolver uuidResolver;
private final String volumeName;
private final UserCredentials userCreds;
protected final OSDServiceClient osdClient;
private final OpenFileList ofl;
private final int maxRetries;
private static final VivaldiCoordinates emptyCoordinates;
static {
emptyCoordinates = VivaldiCoordinates.newBuilder().setLocalError(0).setXCoordinate(0).setYCoordinate(0).build();
}
/*
* private final LRUCache<String,CachedXAttr> xattrCache;
*
* private final int mdCacheTimeout_ms;
*/
Volume(OSDServiceClient osdClient, MRCServiceClient client, String volumeName, UUIDResolver uuidResolver,
UserCredentials userCreds) {
this(osdClient, client, volumeName, uuidResolver, userCreds, 0, 5);
}
Volume(OSDServiceClient osdClient, MRCServiceClient client, String volumeName, UUIDResolver uuidResolver,
UserCredentials userCreds, int mdCacheTimeout_ms, int maxRetries) {
this.mrcClient = client;
this.volumeName = volumeName.endsWith("/") ? volumeName : volumeName + "/";
this.uuidResolver = uuidResolver;
this.userCreds = userCreds;
this.osdClient = osdClient;
this.maxRetries = maxRetries;
this.ofl = new OpenFileList(client);
/*
* this.xattrCache = new LRUCache<String, CachedXAttr>(2048);
* this.mdCacheTimeout_ms = mdCacheTimeout_ms;
*/
ofl.start();
}
/**
* same semantics as File.list()
*
* @param path
* @param cred
* @return
* @throws IOException
*/
public String[] list(String path) throws IOException {
return list(path, userCreds);
}
public String[] list(String path, UserCredentials userCreds) throws IOException {
RPCResponse<DirectoryEntries> response = null;
final String fixedVol = fixPath(volumeName);
final String fixedPath = fixPath(path);
try {
response = mrcClient.readdir(null, RPCAuthentication.authNone, userCreds, fixedVol, fixedPath, 0, 0, true,
0);
DirectoryEntries entries = response.get();
String[] list = new String[entries.getEntriesCount()];
for (int i = 0; i < list.length; i++) {
list[i] = entries.getEntries(i).getName();
}
return list;
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENOENT)
return null;
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
public DirectoryEntry[] listEntries(String path, UserCredentials userCreds) throws IOException {
RPCResponse<DirectoryEntries> response = null;
path = path.replace("//", "/");
final String fixedVol = fixPath(volumeName);
final String fixedPath = fixPath(path);
try {
response = mrcClient.readdir(null, RPCAuthentication.authNone, userCreds, fixedVol, fixedPath, 0, 0, false,
0);
DirectoryEntries entries = response.get();
DirectoryEntry[] list = new DirectoryEntry[entries.getEntriesCount()];
for (int i = 0; i < list.length; i++) {
list[i] = entries.getEntries(i);
Stat s = list[i].getStbuf();
OSDWriteResponse r = ofl.getLocalFS(volumeName + s.getIno());
if (r != null && r.hasTruncateEpoch()) {
// update with local file size, if cahced
if ((r.getTruncateEpoch() > s.getTruncateEpoch()) || (r.getTruncateEpoch() == s.getTruncateEpoch())
&& (r.getSizeInBytes() > s.getSize())) {
s = s.toBuilder().setSize(r.getSizeInBytes()).setTruncateEpoch(r.getTruncateEpoch()).build();
list[i] = list[i].toBuilder().setStbuf(s).build();
}
}
}
return list;
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENOENT)
return null;
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
public DirectoryEntry[] listEntries(String path) throws IOException {
return listEntries(path, userCreds);
}
public File getFile(String path, UserCredentials userCreds) {
return new File(this, userCreds, path);
}
public File getFile(String path) {
return new File(this, userCreds, path);
}
public String getName() {
return volumeName;
}
String fixPath(String path) {
path = path.replace("//", "/");
if (path.endsWith("/"))
path = path.substring(0, path.length() - 1);
if (path.startsWith("/"))
path = path.substring(1);
return path;
}
public long getFreeSpace(UserCredentials userCreds) throws IOException {
RPCResponse<StatVFS> response = null;
try {
response = mrcClient.statvfs(null, RPCAuthentication.authNone, userCreds, volumeName.replace("/", ""), 0);
StatVFS fsinfo = response.get();
return fsinfo.getBavail() * fsinfo.getBsize();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
public long getFreeSpace() throws IOException {
return getFreeSpace(userCreds);
}
public StatVFS statfs(UserCredentials userCreds) throws IOException {
RPCResponse<StatVFS> response = null;
try {
response = mrcClient.statvfs(null, RPCAuthentication.authNone, userCreds, volumeName.replace("/", ""), 0);
StatVFS fsinfo = response.get();
return fsinfo;
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
public StatVFS statfs() throws IOException {
return statfs(userCreds);
}
public boolean isReplicateOnClose(UserCredentials userCreds) throws IOException {
String numRepl = getxattr(fixPath(volumeName), "xtreemfs.repl_factor", userCreds);
if (numRepl == null)
return false;
return numRepl.equals("1");
}
public boolean isReplicateOnClose() throws IOException {
return isReplicateOnClose(userCreds);
}
public boolean isSnapshot() {
return volumeName.indexOf('@') != -1;
}
public int getDefaultReplicationFactor(UserCredentials userCreds) throws IOException {
String numRepl = getxattr(fixPath(volumeName), "xtreemfs.repl_factor", userCreds);
try {
return Integer.valueOf(numRepl);
} catch (Exception ex) {
throw new IOException("cannot fetch replication factor", ex);
}
}
public int getDefaultReplicationFactor() throws IOException {
return getDefaultReplicationFactor(userCreds);
}
public long getUsedSpace(UserCredentials userCreds) throws IOException {
RPCResponse<StatVFS> response = null;
try {
response = mrcClient.statvfs(null, RPCAuthentication.authNone, userCreds, volumeName.replace("/", ""), 0);
StatVFS fsinfo = response.get();
return (fsinfo.getBlocks() - fsinfo.getBavail()) * fsinfo.getBsize();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
public long getUsedSpace() throws IOException {
return getUsedSpace(userCreds);
}
public long getDefaultObjectSize(UserCredentials userCreds) throws IOException {
RPCResponse<StatVFS> response = null;
try {
response = mrcClient.statvfs(null, RPCAuthentication.authNone, userCreds, volumeName.replace("/", ""), 0);
StatVFS fsinfo = response.get();
return fsinfo.getBsize();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
public long getDefaultObjectSize() throws IOException {
return getDefaultObjectSize(userCreds);
}
public void enableSnapshots(boolean enable, UserCredentials userCreds) throws IOException {
RPCResponse r = null;
try {
String value = enable + "";
r = mrcClient.setxattr(null, RPCAuthentication.authNone, userCreds, volumeName.replace("/", ""), "",
"xtreemfs.snapshots_enabled", value, ByteString.copyFrom(value.getBytes()), 0);
r.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (r != null)
r.freeBuffers();
}
}
public void enableSnapshots(boolean enable) throws IOException {
enableSnapshots(enable, userCreds);
}
public void snapshot(String name, boolean recursive, UserCredentials userCreds) throws IOException {
RPCResponse r = null;
try {
String cmd = "c" + (recursive ? "r" : "") + " " + name;
r = mrcClient.setxattr(null, RPCAuthentication.authNone, userCreds, volumeName.replace("/", ""), "",
"xtreemfs.snapshots", cmd, ByteString.copyFrom(cmd.getBytes()), 0);
r.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (r != null)
r.freeBuffers();
}
}
public void snapshot(String name, boolean recursive) throws IOException {
snapshot(name, recursive, userCreds);
}
Stat stat(String path, UserCredentials userCreds) throws IOException {
RPCResponse<getattrResponse> response = null;
try {
response = mrcClient.getattr(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName),
fixPath(path), 0);
Stat s = response.get().getStbuf();
OSDWriteResponse r = ofl.getLocalFS(volumeName + s.getIno());
if (r != null && r.hasTruncateEpoch()) {
// update with local file size, if cahced
if ((r.getTruncateEpoch() > s.getTruncateEpoch()) || (r.getTruncateEpoch() == s.getTruncateEpoch())
&& (r.getSizeInBytes() > s.getSize())) {
s = s.toBuilder().setSize(r.getSizeInBytes()).setTruncateEpoch(r.getTruncateEpoch()).build();
}
}
return s;
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
String getxattr(String path, String name, UserCredentials userCreds) throws IOException {
RPCResponse<getxattrResponse> response = null;
try {
response = mrcClient.getxattr(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName),
fixPath(path), name);
return response.get().getValue();
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENODATA)
return null;
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
String[] listxattr(String path, UserCredentials userCreds) throws IOException {
RPCResponse<listxattrResponse> response = null;
try {
response = mrcClient.listxattr(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName),
fixPath(path), true);
listxattrResponse result = response.get();
List<XAttr> attrs = result.getXattrsList();
String[] names = new String[attrs.size()];
for (int i = 0; i < names.length; i++)
names[i] = attrs.get(i).getName();
return names;
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENODATA)
return null;
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void setxattr(String path, String name, String value, UserCredentials userCreds) throws IOException {
RPCResponse response = null;
try {
response = mrcClient.setxattr(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName),
fixPath(path), name, value, ByteString.copyFrom(value.getBytes()), 0);
response.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void mkdir(String path, int permissions, UserCredentials userCreds) throws IOException {
RPCResponse response = null;
try {
response = mrcClient.mkdir(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName), fixPath(path),
permissions);
response.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void touch(String path, UserCredentials userCreds) throws IOException {
RPCResponse response = null;
try {
response = mrcClient.open(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName), fixPath(path),
GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_CREAT.getNumber(), 0700, 0, emptyCoordinates);
response.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void rename(String src, String dest, UserCredentials userCreds) throws IOException {
RPCResponse response = null;
try {
response = mrcClient.rename(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName), fixPath(src),
fixPath(dest));
response.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void unlink(String path, UserCredentials userCreds) throws IOException {
RPCResponse<unlinkResponse> response = null;
RPCResponse ulnkResp = null;
try {
response = mrcClient
.unlink(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName), fixPath(path));
unlinkResponse resp = response.get();
final FileCredentials fcs = resp.hasCreds() ? resp.getCreds() : null;
if (fcs != null) {
// delete on OSDs
for (GlobalTypes.Replica r : fcs.getXlocs().getReplicasList()) {
final String headOSDuuid = r.getOsdUuids(0);
final ServiceUUID osdAddr = new ServiceUUID(headOSDuuid, uuidResolver);
osdAddr.resolve();
ulnkResp = osdClient.unlink(osdAddr.getAddress(), RPCAuthentication.authNone,
RPCAuthentication.userService, fcs, fcs.getXcap().getFileId());
ulnkResp.get();
ulnkResp.freeBuffers();
ulnkResp = null;
}
}
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
if (ulnkResp != null)
ulnkResp.freeBuffers();
}
}
void storeFileSizeUpdate(String fileId, OSDWriteResponse resp, UserCredentials userCreds) {
ofl.fsUpdate(fileId, resp);
}
void pushFileSizeUpdate(String fileId, UserCredentials userCreds) throws IOException {
OSDWriteResponse owr = ofl.sendFsUpdate(fileId);
if (owr != null) {
XCap cap = ofl.getCapability(fileId);
RPCResponse response = null;
try {
if (!owr.hasSizeInBytes())
return;
long newSize = owr.getSizeInBytes();
int newEpoch = owr.getTruncateEpoch();
OSDWriteResponse.Builder osdResp = OSDWriteResponse.newBuilder().setSizeInBytes(newSize)
.setTruncateEpoch(newEpoch);
xtreemfs_update_file_sizeRequest fsBuf = xtreemfs_update_file_sizeRequest.newBuilder().setXcap(cap)
.setOsdWriteResponse(osdResp).build();
response = mrcClient.xtreemfs_update_file_size(null, RPCAuthentication.authNone, userCreds, fsBuf);
response.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
}
void closeFile(RandomAccessFile file, String fileId, boolean readOnly, UserCredentials userCreds)
throws IOException {
pushFileSizeUpdate(fileId, userCreds);
try {
XCap cap = ofl.getCapability(fileId);
// notify MRC that file has been closed
RPCResponse response = null;
try {
response = mrcClient.xtreemfs_update_file_size(null, RPCAuthentication.authNone, userCreds, cap,
OSDWriteResponse.newBuilder().build(), true, emptyCoordinates);
response.get();
} catch (Exception ex) {
Logging.logError(Logging.LEVEL_ERROR, this, ex);
throw new IOException("file could not be closed due to exception");
} finally {
if (response != null)
response.freeBuffers();
}
} finally {
ofl.closeFile(fileId, file);
}
}
RandomAccessFile openFile(File parent, int flags, int mode, UserCredentials userCreds) throws IOException {
RPCResponse<openResponse> response = null;
final String fullPath = fixPath(volumeName + parent.getPath());
final String fixedVol = fixPath(volumeName);
final String fixedPath = fixPath(parent.getPath());
try {
response = mrcClient.open(null, RPCAuthentication.authNone, userCreds, fixedVol, fixedPath, flags, mode, 0,
emptyCoordinates);
FileCredentials cred = response.get().getCreds();
boolean syncMd = (flags & GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_SYNC.getNumber()) > 0;
boolean rdOnly = cred.getXlocs().getReplicaUpdatePolicy()
.equals(ReplicaUpdatePolicies.REPL_UPDATE_PC_RONLY);
RandomAccessFile file = new RandomAccessFile(parent, this, osdClient, cred, rdOnly, syncMd, userCreds);
ofl.openFile(cred.getXcap(), file);
return file;
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENOENT)
throw new FileNotFoundException("file '" + fullPath + "' does not exist");
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
XCap truncateFile(String fileId, UserCredentials userCreds) throws IOException {
RPCResponse<XCap> response = null;
try {
response = mrcClient.ftruncate(null, RPCAuthentication.authNone, userCreds, ofl.getCapability(fileId));
return response.get();
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENOENT)
throw new FileNotFoundException("file '" + fileId + "' does not exist");
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
List<String> getSuitableOSDs(File file, int numOSDs, UserCredentials userCreds) throws IOException {
String fileId = getxattr(file.getPath(), "xtreemfs.file_id", userCreds);
RPCResponse<xtreemfs_get_suitable_osdsResponse> response = null;
try {
xtreemfs_get_suitable_osdsRequest request = xtreemfs_get_suitable_osdsRequest.newBuilder()
.setFileId(fileId).setNumOsds(numOSDs).build();
response = mrcClient.xtreemfs_get_suitable_osds(null, RPCAuthentication.authNone, userCreds, request);
return response.get().getOsdUuidsList();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void chmod(String path, int mode, UserCredentials userCreds) throws IOException {
Stat stbuf = Stat.newBuilder().setAtimeNs(0).setAttributes(0).setBlksize(0).setCtimeNs(0).setDev(0).setEtag(0)
.setGroupId("").setIno(0).setMode(mode).setMtimeNs(0).setNlink(0).setSize(0).setTruncateEpoch(0)
.setUserId("").build();
int toSet = Setattrs.SETATTR_MODE.getNumber();
RPCResponse response = null;
try {
response = mrcClient.setattr(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName),
fixPath(path), stbuf, toSet);
response.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void chown(String path, String user, UserCredentials userCreds) throws IOException {
Stat stbuf = Stat.newBuilder().setAtimeNs(0).setAttributes(0).setBlksize(0).setCtimeNs(0).setDev(0).setEtag(0)
.setGroupId("").setIno(0).setMode(0).setMtimeNs(0).setNlink(0).setSize(0).setTruncateEpoch(0)
.setUserId(user).build();
int toSet = Setattrs.SETATTR_UID.getNumber();
RPCResponse response = null;
try {
response = mrcClient.setattr(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName),
fixPath(path), stbuf, toSet);
response.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void chgrp(String path, String group, UserCredentials userCreds) throws IOException {
Stat stbuf = Stat.newBuilder().setAtimeNs(0).setAttributes(0).setBlksize(0).setCtimeNs(0).setDev(0).setEtag(0)
.setGroupId(group).setIno(0).setMode(0).setMtimeNs(0).setNlink(0).setSize(0).setTruncateEpoch(0)
.setUserId("").build();
int toSet = Setattrs.SETATTR_GID.getNumber();
RPCResponse response = null;
try {
response = mrcClient.setattr(null, RPCAuthentication.authNone, userCreds, fixPath(volumeName),
fixPath(path), stbuf, toSet);
response.get();
} catch (PBRPCException ex) {
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response != null)
response.freeBuffers();
}
}
void setACL(String path, Map<String, Object> aclEntries, UserCredentials userCreds) throws IOException {
// remove all existing entries first
Map<String, Object> existingACL = getACL(path, userCreds);
for (Entry<String, Object> entry : existingACL.entrySet()) {
String entity = entry.getKey();
if (!entity.equals("u:") && !entity.equals("g:") && !entity.equals("o:") && !entity.equals("m:"))
setxattr(path, "xtreemfs.acl", "x " + entity, userCreds);
}
// add all entries from the given list
for (Entry<String, Object> entry : aclEntries.entrySet())
setxattr(path, "xtreemfs.acl", "m " + entry.getKey() + ":" + entry.getValue(), userCreds);
}
Map<String, Object> getACL(String path, UserCredentials userCreds) throws IOException {
try {
String aclAsJSON = getxattr(path, "xtreemfs.acl", userCreds);
return (Map<String, Object>) JSONParser.parseJSON(new JSONString(aclAsJSON));
} catch (JSONException e) {
throw new IOException(e);
}
}
static IOException wrapException(PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENOENT)
return new FileNotFoundException(ex.getErrorMessage());
return new IOException(ex.getPOSIXErrno() + ": " + ex.getErrorMessage(), ex);
}
static IOException wrapException(InterruptedException ex) {
return new IOException("operation was interruped: " + ex, ex);
}
public void finalize() {
ofl.shutdown();
}
void addReplica(File file, int width, List<String> osdSet, int flags, UserCredentials userCreds) throws IOException {
RPCResponse<openResponse> response1 = null;
RPCResponse response3 = null;
final String fullPath = fixPath(volumeName + file.getPath());
final String fixedVol = fixPath(volumeName);
final String fixedPath = fixPath(file.getPath());
try {
org.xtreemfs.common.clients.Replica r = file.getReplica(0);
StripingPolicy sp = StripingPolicy.newBuilder().setStripeSize(r.getStripeSize()).setWidth(width)
.setType(r.getStripingPolicy()).build();
org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replica newReplica = org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replica
.newBuilder().addAllOsdUuids(osdSet).setReplicationFlags(flags).setStripingPolicy(sp).build();
response1 = mrcClient.open(null, RPCAuthentication.authNone, userCreds, fixedVol, fixedPath, 0,
GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber(), 0, emptyCoordinates);
FileCredentials oldCreds = response1.get().getCreds();
response1.freeBuffers();
response1 = null;
boolean readOnlyRepl = (oldCreds.getXlocs().getReplicaUpdatePolicy()
.equals(ReplicaUpdatePolicies.REPL_UPDATE_PC_RONLY));
xtreemfs_replica_addRequest request = xtreemfs_replica_addRequest.newBuilder().setNewReplica(newReplica)
.setFileId(oldCreds.getXcap().getFileId()).build();
response3 = mrcClient.xtreemfs_replica_add(null, RPCAuthentication.authNone, userCreds, request);
response3.get();
response3.freeBuffers();
response3 = null;
if (readOnlyRepl) {
if ((flags & GlobalTypes.REPL_FLAG.REPL_FLAG_FULL_REPLICA.getNumber()) > 0) {
response1 = mrcClient.open(null, RPCAuthentication.authNone, userCreds, fixedVol, fixedPath, 0,
GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber(), 0, emptyCoordinates);
FileCredentials newCreds = response1.get().getCreds();
for (int objNo = 0; objNo < width; objNo++) {
ServiceUUID osd = new ServiceUUID(osdSet.get(objNo), uuidResolver);
response3 = osdClient
.read(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService,
newCreds, newCreds.getXcap().getFileId(), objNo, 0, 0, 1);
response3.get();
response3.freeBuffers();
response3 = null;
}
}
} else {
response1 = mrcClient.open(null, RPCAuthentication.authNone, userCreds, fixedVol, fixedPath, 0,
GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber(), 0, emptyCoordinates);
FileCredentials newCreds = response1.get().getCreds();
ServiceUUID osd = new ServiceUUID(osdSet.get(0), uuidResolver);
response3 = osdClient.xtreemfs_rwr_notify(osd.getAddress(), RPCAuthentication.authNone,
RPCAuthentication.userService, newCreds);
response3.get();
response3.freeBuffers();
response3 = null;
}
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENOENT)
throw new FileNotFoundException("file '" + fullPath + "' does not exist");
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response1 != null)
response1.freeBuffers();
if (response3 != null)
response3.freeBuffers();
}
}
void removeReplica(File file, String headOSDuuid, UserCredentials userCreds) throws IOException {
RPCResponse<openResponse> response1 = null;
RPCResponse<FileCredentials> response2 = null;
RPCResponse response3 = null;
final String fullPath = fixPath(volumeName + file.getPath());
final String fixedVol = fixPath(volumeName);
final String fixedPath = fixPath(file.getPath());
try {
response1 = mrcClient.open(null, RPCAuthentication.authNone, userCreds, fixedVol, fixedPath, 0,
GlobalTypes.SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber(), 0, emptyCoordinates);
FileCredentials oldCreds = response1.get().getCreds();
xtreemfs_replica_removeRequest request = xtreemfs_replica_removeRequest.newBuilder()
.setOsdUuid(headOSDuuid).setFileId(oldCreds.getXcap().getFileId()).build();
response2 = mrcClient.xtreemfs_replica_remove(null, RPCAuthentication.authNone, userCreds, request);
FileCredentials delCap = response2.get();
ServiceUUID osd = new ServiceUUID(headOSDuuid, uuidResolver);
boolean readOnlyRepl = (oldCreds.getXlocs().getReplicaUpdatePolicy()
.equals(ReplicaUpdatePolicies.REPL_UPDATE_PC_RONLY));
FileCredentials newCreds = FileCredentials.newBuilder().setXcap(delCap.getXcap())
.setXlocs(oldCreds.getXlocs()).build();
response3 = osdClient.unlink(osd.getAddress(), RPCAuthentication.authNone, RPCAuthentication.userService,
newCreds, oldCreds.getXcap().getFileId());
response3.get();
} catch (PBRPCException ex) {
if (ex.getPOSIXErrno() == POSIXErrno.POSIX_ERROR_ENOENT)
throw new FileNotFoundException("file '" + fullPath + "' does not exist");
throw wrapException(ex);
} catch (InterruptedException ex) {
throw wrapException(ex);
} finally {
if (response1 != null)
response1.freeBuffers();
if (response2 != null)
response2.freeBuffers();
if (response3 != null)
response3.freeBuffers();
}
}
void shutdown() {
ofl.shutdown();
}
/**
* @return the maxRetries
*/
public int getMaxRetries() {
return maxRetries;
}
}

View File

@@ -0,0 +1,99 @@
/*
* Copyright (c) 2009 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients.internal;
import java.util.List;
import org.xtreemfs.common.xloc.Replica;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicy;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicyType;
/**
*
* @author bjko
*/
public abstract class ObjectMapper {
private StripingPolicy fileSP;
protected ObjectMapper(StripingPolicy fileSP) {
this.fileSP = fileSP;
}
public static ObjectMapper getMapper(StripingPolicy fileSP) {
if (fileSP.getType() == StripingPolicyType.STRIPING_POLICY_RAID0)
return new RAID0ObjectMapper(fileSP);
else
throw new RuntimeException("unknown striping policy type: "+fileSP.getType());
}
public abstract List<ObjectRequest> readRequest(int length, long fileOffset, Replica replica);
public abstract List<ObjectRequest> writeRequest(ReusableBuffer data, long fileOffset, Replica replica);
public static class ObjectRequest {
private final long objNo;
private final int offset;
private final int length;
private ReusableBuffer data;
private final String osdUUID;
public ObjectRequest(long objNo, int offset, int length, String osdUUID, ReusableBuffer data) {
this.objNo = objNo;
this.offset = offset;
this.data = data;
this.length = length;
this.osdUUID = osdUUID;
}
/**
* @return the objNo
*/
public long getObjNo() {
return objNo;
}
/**
* @return the offset
*/
public int getOffset() {
return offset;
}
/**
* @return the data
*/
public ReusableBuffer getData() {
return data;
}
/**
* @return the length
*/
public int getLength() {
return length;
}
/**
* @return the osdUUID
*/
public String getOsdUUID() {
return osdUUID;
}
/**
* @param data the data to set
*/
public void setData(ReusableBuffer data) {
this.data = data;
}
}
}

View File

@@ -0,0 +1,226 @@
/*
* Copyright (c) 2009 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients.internal;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.xtreemfs.common.clients.RandomAccessFile;
import org.xtreemfs.foundation.TimeSync;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.pbrpc.client.RPCAuthentication;
import org.xtreemfs.foundation.pbrpc.client.RPCResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.MRCServiceClient;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.CONSTANTS;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.OSDWriteResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.SYSTEM_V_FCNTL;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XCap;
/**
*
* @author bjko
*/
public class OpenFileList extends Thread {
private final Map<String,CapEntry> capabilities;
private final Map<String,OSDWriteResponse> fsUpdateCache;
private final MRCServiceClient client;
private volatile boolean quit;
public OpenFileList(MRCServiceClient client) {
super("XCapRNThr");
capabilities = new HashMap();
fsUpdateCache = new HashMap<String, OSDWriteResponse>();
this.client = client;
}
public void openFile(XCap capability, RandomAccessFile f) {
synchronized (capabilities) {
CapEntry e = capabilities.get(capability.getFileId());
if (e == null) {
e = new CapEntry(capability, TimeSync.getLocalSystemTime()+capability.getExpireTimeoutS()*1000);
capabilities.put(capability.getFileId(),e);
} else {
e.upgradeCap(capability, TimeSync.getLocalSystemTime()+capability.getExpireTimeoutS()*1000);
}
e.addFile(f);
}
}
public void closeFile(String fileId, RandomAccessFile f) {
boolean lastFile = false;
synchronized (capabilities) {
CapEntry e = capabilities.get(fileId);
if (e != null) {
lastFile = e.removeFile(f);
if (lastFile)
capabilities.remove(fileId);
} else {
throw new IllegalStateException("entry must nut be null");
}
}
if (lastFile) {
synchronized (fsUpdateCache) {
fsUpdateCache.remove(fileId);
}
}
}
public OSDWriteResponse sendFsUpdate(String fileId) {
synchronized (fsUpdateCache) {
return fsUpdateCache.remove(fileId);
}
}
public OSDWriteResponse getLocalFS(String fileId) {
synchronized (fsUpdateCache) {
return fsUpdateCache.get(fileId);
}
}
public void fsUpdate(String fileId, OSDWriteResponse resp) {
if ((resp == null) || !resp.hasSizeInBytes())
return;
synchronized (fsUpdateCache) {
OSDWriteResponse last = fsUpdateCache.get(fileId);
if (last == null) {
fsUpdateCache.put(fileId, resp);
return;
}
final OSDWriteResponse newFS = resp;
final OSDWriteResponse oldFS = last;
if ( (newFS.getTruncateEpoch() > oldFS.getTruncateEpoch()) ||
(newFS.getTruncateEpoch() == oldFS.getTruncateEpoch()) &&
(newFS.getSizeInBytes() > oldFS.getSizeInBytes()) ) {
fsUpdateCache.put(fileId, resp);
}
}
}
public XCap getCapability(String fileId) {
synchronized (capabilities) {
CapEntry e = capabilities.get(fileId);
if (e == null)
return null;
return e.getCap();
}
}
public void shutdown() {
this.quit = true;
this.interrupt();
}
public void run() {
//check for CAP-renew
do {
List<CapEntry> renewList = new LinkedList();
synchronized (capabilities) {
//check caps
final long expTime = TimeSync.getLocalSystemTime()+CONSTANTS.XCAP_RENEW_INTERVAL_IN_MIN.getNumber()*60*1000;
for (CapEntry e : capabilities.values()) {
if (e.getLocalTimestamp() <= expTime ) {
//needs renew!
renewList.add(e);
}
}
}
for (CapEntry cap : renewList) {
renewCap(cap);
}
try {
sleep(CONSTANTS.XCAP_RENEW_INTERVAL_IN_MIN.getNumber()*60*1000/2);
} catch (InterruptedException ex) {
break;
}
} while (!quit);
}
protected void renewCap(CapEntry cap) {
assert(cap != null);
RPCResponse<XCap> r = null;
try {
r = client.xtreemfs_renew_capability(null, RPCAuthentication.authNone, RPCAuthentication.userService, cap.getCap());
XCap newCap = r.get();
synchronized (capabilities) {
cap.updateCap(newCap, TimeSync.getLocalSystemTime()+CONSTANTS.XCAP_RENEW_INTERVAL_IN_MIN.getNumber()*60*1000-1000);
}
} catch (Exception ex) {
Logging.logMessage(Logging.LEVEL_ERROR, this,"cannot renew cap due to exception");
Logging.logError(Logging.LEVEL_ERROR, this, ex);
} finally {
if (r != null)
r.freeBuffers();
}
}
public static class CapEntry {
private XCap cap;
private long localTimestamp;
final List<RandomAccessFile> files;
public CapEntry(XCap c, long ts) {
cap = c;
localTimestamp = ts;
files = new LinkedList();
}
public void addFile(RandomAccessFile file) {
files.add(file);
}
public boolean removeFile(RandomAccessFile file) {
files.remove(file);
return files.isEmpty();
}
public void updateCap(XCap c, long ts) {
cap = c;
localTimestamp = ts;
for (RandomAccessFile file : files)
file.updateCap(c);
}
public void upgradeCap(XCap c, long ts) {
//FIXME
/*
* upgrade: always from R to RW
* never from RW to R
*/
if ( ((cap.getAccessMode() & SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDONLY.getNumber()) > 0)
&& ((c.getAccessMode() & SYSTEM_V_FCNTL.SYSTEM_V_FCNTL_H_O_RDWR.getNumber()) > 0) ) {
updateCap(c, ts);
}
}
/**
* @return the cap
*/
public XCap getCap() {
return cap;
}
/**
* @return the localTimestamp
*/
public long getLocalTimestamp() {
return localTimestamp;
}
}
}

View File

@@ -0,0 +1,89 @@
/*
* Copyright (c) 2009 by Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients.internal;
import java.util.LinkedList;
import java.util.List;
import org.xtreemfs.common.xloc.Replica;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicy;
/**
*
* @author bjko
*/
public class RAID0ObjectMapper extends ObjectMapper {
private final int stripeSize;
protected RAID0ObjectMapper(StripingPolicy fileSP) {
super(fileSP);
stripeSize = fileSP.getStripeSize()*1024;
}
@Override
public List<ObjectRequest> readRequest(int length, long fileOffset, Replica replica) {
List<ObjectRequest> reqs = new LinkedList();
final long firstObj = fileOffset / stripeSize;
final long lastObj = (fileOffset+length-1) / stripeSize;
final int firstOffset = (int)fileOffset%stripeSize;
if (firstObj == lastObj) {
ObjectRequest rq = new ObjectRequest(firstObj, firstOffset, length,
getOSDForObject(replica, firstObj), null);
reqs.add(rq);
return reqs;
}
//first obj
ObjectRequest rq = new ObjectRequest(firstObj, firstOffset, stripeSize-firstOffset,
getOSDForObject(replica, firstObj),null);
reqs.add(rq);
for (long o = firstObj+1; o < lastObj; o++) {
rq = new ObjectRequest(o, 0, stripeSize,
getOSDForObject(replica, o), null);
reqs.add(rq);
}
//last obj
final int lastSize = ((length+fileOffset)%stripeSize == 0) ? stripeSize : (int) (length+fileOffset)%stripeSize;
if (lastSize > 0) {
rq = new ObjectRequest(lastObj, 0, lastSize, getOSDForObject(replica, lastObj),
null);
reqs.add(rq);
}
return reqs;
}
@Override
public List<ObjectRequest> writeRequest(ReusableBuffer data, long fileOffset, Replica replica) {
List<ObjectRequest> reqs = readRequest(data.remaining(), fileOffset, replica);
int pCnt = 0;
for (ObjectRequest rq : reqs) {
ReusableBuffer viewBuf = data.createViewBuffer();
viewBuf.range(pCnt, rq.getLength());
pCnt += rq.getLength();
rq.setData(viewBuf);
}
return reqs;
}
protected String getOSDForObject(Replica replica, long objNo) {
return replica.getOSDForObject(objNo).toString();
}
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2008 by Nele Andersen, Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients.io;
public interface ByteMapper {
/**
* reads data from file.
* @param data a buffer of length (length+offset) in which the data is stored
* @param offset offset within buffer to write to
* @param length number of bytes to read
* @param filePosition offset within file
* @return the number of bytes read
* @throws java.lang.Exception
*/
public int read(byte[] data, int offset, int length, long filePosition) throws Exception;
/**
* writes data to a file.
* @param data the data to write (buffer must be length+offset bytes long).
* @param offset the position within the buffer to start at.
* @param length number of bytes to write
* @param filePosition the offset within the file
* @return the number of bytes written
* @throws java.lang.Exception
*/
public int write(byte[] data, int offset, int length, long filePosition) throws Exception;
}

View File

@@ -0,0 +1,21 @@
/*
* Copyright (c) 2008 by Nele Andersen,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients.io;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicyType;
public class ByteMapperFactory {
public static ByteMapper createByteMapper(int policy, int stripeSize, ObjectStore store) {
if( StripingPolicyType.valueOf(policy) == StripingPolicyType.STRIPING_POLICY_RAID0)
return new ByteMapperRAID0(stripeSize, store);
throw new IllegalArgumentException("Unknown striping policy ID");
}
}

View File

@@ -0,0 +1,152 @@
/*
* Copyright (c) 2008 by Nele Andersen, Bjoern Kolbeck,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients.io;
import java.io.IOException;
import org.xtreemfs.foundation.buffer.BufferPool;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
class ByteMapperRAID0 implements ByteMapper {
final int stripeSize;
ObjectStore objectStore;
public ByteMapperRAID0(int stripeSize, ObjectStore objectStore){
this.stripeSize = stripeSize;
this.objectStore = objectStore;
}
/**
*
* @param resultBuffer - the buffer into which the data is read.
* @param offset - the start offset of the data.
* @param bytesToRead - the maximum number of bytes read.
* @return the total number of bytes read into the buffer, or -1 if
* there is no more data because the end of the file has been reached.
* @throws Exception
* @throws IOException
*/
public int read(byte[] data, int offset, int length, long filePosition) throws Exception{
if (data.length < offset+length)
throw new RuntimeException("buffer is too small!");
final int firstObject = (int) (filePosition / this.stripeSize);
assert(firstObject >= 0);
int lastObject = (int) ( (filePosition + ((long)length)) / this.stripeSize);
if (( (filePosition + ((long)length)) % this.stripeSize) == 0)
lastObject--;
assert(lastObject >= firstObject);
final int offsetInFirstObject = (int) (filePosition % this.stripeSize);
assert(offsetInFirstObject < stripeSize);
final int bytesInLastObject = (int) (((filePosition + length) % this.stripeSize) == 0 ? this.stripeSize :
((filePosition + length) % this.stripeSize));
assert(bytesInLastObject > 0);
assert(bytesInLastObject <= stripeSize);
int bytesRead = 0;
for (int obj = firstObject; obj <= lastObject; obj++) {
int bytesToRead = this.stripeSize;
int objOffset = 0;
if (obj == firstObject) {
objOffset = offsetInFirstObject;
bytesToRead = this.stripeSize - objOffset;
}
if (obj == lastObject) {
if (firstObject == lastObject) {
bytesToRead = bytesInLastObject-objOffset;
} else {
bytesToRead = bytesInLastObject;
}
}
assert(bytesToRead > 0);
assert(objOffset >= 0);
assert(objOffset < stripeSize);
assert(objOffset+bytesToRead <= stripeSize);
//System.out.println("read "+obj+" objOffset="+objOffset+" length="+bytesToRead);
ReusableBuffer rb = objectStore.readObject(obj, objOffset, bytesToRead);
assert(offset+bytesRead <= data.length);
if (rb == null) {
//EOF!
break;
}
if (rb.capacity() < bytesToRead) {
//EOF!
final int dataToRead = Math.min(rb.capacity(),data.length-offset-bytesRead);
rb.get(data, offset+bytesRead,dataToRead);
bytesRead += rb.capacity();
BufferPool.free(rb);
break;
}
//can get less data then requested!
rb.get(data, offset+bytesRead, rb.remaining());
bytesRead += rb.capacity();
BufferPool.free(rb);
}
return bytesRead;
}
public int write(byte[] data, int offset, int length, long filePosition) throws Exception{
final int firstObject = (int) (filePosition / this.stripeSize);
int lastObject = (int) ( (filePosition + ((long)length)) / this.stripeSize);
if (( (filePosition + ((long)length)) % this.stripeSize) == 0)
lastObject--;
final int offsetInFirstObject = (int) (filePosition % this.stripeSize);
int bytesInLastObject = -1;
if (firstObject == lastObject) {
bytesInLastObject = length;
} else {
if (((filePosition + length) % this.stripeSize) == 0) {
bytesInLastObject = this.stripeSize;
assert(bytesInLastObject >= 0);
} else {
bytesInLastObject = (int)((filePosition + length) % this.stripeSize);
assert(bytesInLastObject >= 0);
}
}
int bytesWritten = 0;
for (int obj = firstObject; obj <= lastObject; obj++) {
int bytesToWrite = this.stripeSize;
int objOffset = 0;
if (obj == firstObject) {
bytesToWrite = this.stripeSize-offsetInFirstObject;
objOffset = offsetInFirstObject;
}
if (obj == lastObject)
bytesToWrite = bytesInLastObject;
ReusableBuffer view = ReusableBuffer.wrap(data, offset+bytesWritten, bytesToWrite);
objectStore.writeObject(objOffset, obj, view);
bytesWritten += bytesToWrite;
}
return bytesWritten;
}
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright (c) 2008 by Nele Andersen,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.clients.io;
import java.io.IOException;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
public interface ObjectStore {
/**
* read an object from an OSD.
* @param offset offset within the object
* @param objectNo object number (0 is the first object in a file)
* @param length number of bytes to read
* @return the data read. In case of an EOF the buffer's length will be smaller than requested!
* @throws java.io.IOException
* @throws org.xtreemfs.foundation.json.JSONException
* @throws java.lang.InterruptedException
* @throws org.xtreemfs.common.clients.HttpErrorException
*/
ReusableBuffer readObject(long objectNo, int offset, int length) throws IOException,
InterruptedException;
void writeObject(long offset, long objectNo, ReusableBuffer buffer) throws IOException,
InterruptedException;
}

View File

@@ -0,0 +1,970 @@
///* Copyright (c) 2008 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin.
//
//This file is part of XtreemFS. XtreemFS is part of XtreemOS, a Linux-based
//Grid Operating System, see <http://www.xtreemos.eu> for more details.
//The XtreemOS project has been developed with the financial support of the
//European Commission's IST program under contract #FP6-033576.
//
//XtreemFS is free software: you can redistribute it and/or modify it under
//the terms of the GNU General Public License as published by the Free
//Software Foundation, either version 2 of the License, or (at your option)
//any later version.
//
//XtreemFS is distributed in the hope that it will be useful,
//but WITHOUT ANY WARRANTY; without even the implied warranty of
//MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
//GNU General Public License for more details.
//
//You should have received a copy of the GNU General Public License
//along with XtreemFS. If not, see <http://www.gnu.org/licenses/>.
// */
///*
// * AUTHORS: Nele Andersen (ZIB), Björn Kolbeck (ZIB), Christian Lorenz (ZIB)
// */
//package org.xtreemfs.common.clients.io;
//
//import java.io.IOException;
//import java.net.InetSocketAddress;
//import java.util.ArrayList;
//import java.util.Collections;
//import java.util.Iterator;
//import java.util.List;
//import java.util.concurrent.atomic.AtomicLong;
//
//import org.xtreemfs.common.uuids.ServiceUUID;
//import org.xtreemfs.common.xloc.InvalidXLocationsException;
//import org.xtreemfs.common.xloc.Replica;
//import org.xtreemfs.common.xloc.StripingPolicyImpl;
//import org.xtreemfs.common.xloc.XLocations;
//import org.xtreemfs.foundation.buffer.BufferPool;
//import org.xtreemfs.foundation.buffer.ReusableBuffer;
//import org.xtreemfs.foundation.logging.Logging;
//import org.xtreemfs.foundation.logging.Logging.Category;
//import org.xtreemfs.foundation.monitoring.Monitoring;
//import org.xtreemfs.foundation.monitoring.NumberMonitoring;
//import org.xtreemfs.foundation.oncrpc.client.RPCNIOSocketClient;
//import org.xtreemfs.foundation.oncrpc.client.RPCResponse;
//import org.xtreemfs.foundation.oncrpc.utils.ONCRPCException;
//import org.xtreemfs.interfaces.Constants;
//import org.xtreemfs.interfaces.FileCredentials;
//import org.xtreemfs.interfaces.FileCredentialsSet;
//import org.xtreemfs.interfaces.NewFileSize;
//import org.xtreemfs.interfaces.OSDWriteResponse;
//import org.xtreemfs.interfaces.ObjectData;
//import org.xtreemfs.interfaces.Stat;
//import org.xtreemfs.interfaces.StatSet;
//import org.xtreemfs.interfaces.StringSet;
//import org.xtreemfs.interfaces.StripingPolicy;
//import org.xtreemfs.interfaces.UserCredentials;
//import org.xtreemfs.interfaces.VivaldiCoordinates;
//import org.xtreemfs.interfaces.XCap;
//import org.xtreemfs.interfaces.MRCInterface.MRCInterface;
//import org.xtreemfs.interfaces.MRCInterface.setxattrResponse;
//import org.xtreemfs.interfaces.OSDInterface.OSDException;
//import org.xtreemfs.mrc.ac.FileAccessManager;
//import org.xtreemfs.mrc.client.MRCClient;
//import org.xtreemfs.osd.ErrorCodes;
//import org.xtreemfs.osd.client.OSDClient;
//
///**
// * @deprecated
// */
//public class RandomAccessFile implements ObjectStore {
// /**
// * resorts the replicas <br>
// * 12.05.2009
// */
// public abstract static class ReplicaSelectionPolicy {
// public abstract List<Replica> getReplicaOrder(List<Replica> replicas);
// }
//
// /**
// * policy randomizes the entries in list <br>
// * DEFAULT POLICY
// */
// public final ReplicaSelectionPolicy RANDOM_REPLICA_SELECTION_POLICY = new RandomAccessFile.ReplicaSelectionPolicy() {
// @Override
// public List<Replica> getReplicaOrder(
// List<Replica> replicas) {
// List<Replica> list = new ArrayList<Replica>(
// replicas);
// Collections
// .shuffle(list);
// return list;
// }
// };
//
// /**
// * policy rotates the entries in list (like round-robin)
// */
// public final ReplicaSelectionPolicy SEQUENTIAL_REPLICA_SELECTION_POLICY = new RandomAccessFile.ReplicaSelectionPolicy() {
// private int rotateValue = 0;
//
// @Override
// public List<Replica> getReplicaOrder(
// List<Replica> replicas) {
// List<Replica> list = new ArrayList<Replica>(
// replicas);
// Collections
// .rotate(
// list,
// rotateValue);
// rotateValue = 0 - (((0 - rotateValue) + 1) % list
// .size());
// return list;
// }
// };
//
// private static final int DEFAULT_CAP_VALIDITY = 60;
//
// private MRCClient mrcClient;
//
// private OSDClient osdClient;
//
// private FileCredentials fileCredentials;
//
// // all replicas have the same stripesize at the moment
// private StripingPolicyImpl stripingPolicy;
//
// private final String fileId;
//
// private final int mode;
//
// private final String volName;
//
// private final String pathName;
//
// private final InetSocketAddress mrcAddress;
//
// private ByteMapper byteMapper;
//
// private OSDWriteResponse wresp;
//
// private long filePos;
//
// private XLocations xLoc;
//
// private long capTime;
//
// private List<Replica> replicaOrder;
//
// private boolean isReadOnly;
//
// private final UserCredentials credentials;
//
// private ReplicaSelectionPolicy replicaSelectionPolicy;
//
// /*
// * monitoring stuff
// */
// private AtomicLong monitoringReadDataSizeInLastXs;
//
// private Thread monitoringThread = null;
//
// private NumberMonitoring monitoring;
//
// /**
// * Measures the throughput of the last 1 second.
// */
// public static final String MONITORING_KEY_THROUGHPUT_OF_LAST_X_SECONDS = "RAF: throughput of last X seconds (KB/s)";
//
// // /**
// // * Measures the throughput of the read data so far. Just the time required
// // * for the real network-transfer will be used.
// // */
// // public static final String MONITORING_KEY_THROUGHPUT =
// // "RAF: throughput of all read data so far (KB/s)";
//
// public static final int MONITORING_INTERVAL = 1000; // 10s
//
// public RandomAccessFile(String mode, InetSocketAddress mrcAddress, String pathName,
// RPCNIOSocketClient rpcClient, String userID, List<String> groupIDs) throws ONCRPCException,
// InterruptedException, IOException {
// this(mode, mrcAddress, pathName, rpcClient, MRCClient.getCredentials(userID, groupIDs));
// }
//
// public RandomAccessFile(String mode, InetSocketAddress mrcAddress, String pathName,
// RPCNIOSocketClient rpcClient, UserCredentials credentials) throws ONCRPCException,
// InterruptedException, IOException {
//
// this.mrcAddress = mrcAddress;
// this.mode = translateMode(mode);
//
// int index = pathName.indexOf('/');
// if (index == -1)
// throw new IOException("invalid path: " + pathName);
//
// this.volName = pathName.substring(0, index);
// this.pathName = pathName.substring(index + 1);
//
// assert (rpcClient != null);
//
// // use the shared speedy to create an MRC and OSD client
// mrcClient = new MRCClient(rpcClient, mrcAddress);
// osdClient = new OSDClient(rpcClient);
//
// this.credentials = credentials;
//
// // OSD selection
// this.replicaSelectionPolicy = RANDOM_REPLICA_SELECTION_POLICY;
//
// // create a new file if necessary
// RPCResponse<FileCredentials> r = mrcClient.open(mrcAddress, credentials, volName, this.pathName,
// FileAccessManager.O_CREAT, this.mode, 0, new VivaldiCoordinates());
// fileCredentials = r.get();
// r.freeBuffers();
//
// if (fileCredentials.getXlocs().getReplicas().size() == 0) {
// throw new IOException("cannot assign OSDs to file");
// }
//
// // all replicas have the same striping policy (more precisely the same
// // stripesize) at the moment
// stripingPolicy = StripingPolicyImpl.getPolicy(fileCredentials.getXlocs().getReplicas().get(0), 0);
// try {
// this.xLoc = new XLocations(fileCredentials.getXlocs(), null);
// } catch (InvalidXLocationsException ex) {
// // ignore
// }
//
// // always use first replica at beginning (original order)
// replicaOrder = this.replicaSelectionPolicy.getReplicaOrder(xLoc.getReplicas());
//
// byteMapper = ByteMapperFactory.createByteMapper(stripingPolicy.getPolicyId(), stripingPolicy
// .getStripeSizeForObject(0), this);
//
// capTime = System.currentTimeMillis();
//
// isReadOnly = fileCredentials.getXlocs().getReplica_update_policy().equals(
// Constants.REPL_UPDATE_PC_RONLY);
//
// fileId = fileCredentials.getXcap().getFile_id();
// wresp = null;
// filePos = 0;
//
// monitoring = new NumberMonitoring();
// monitoringReadDataSizeInLastXs = new AtomicLong(0);
// if (Monitoring.isEnabled()) {
// // enable statistics in client
// RPCNIOSocketClient.ENABLE_STATISTICS = true;
//
// monitoringThread = new Thread(new Runnable() {
// @Override
// public void run() {
// try {
// while (true) {
// if (Thread.interrupted())
// break;
// Thread.sleep(MONITORING_INTERVAL); // sleep
//
// long sizeInLastXs = monitoringReadDataSizeInLastXs.getAndSet(0);
// if (sizeInLastXs > 0) // log only interesting values
// monitoring.put(MONITORING_KEY_THROUGHPUT_OF_LAST_X_SECONDS,
// (sizeInLastXs / 1024d) / (MONITORING_INTERVAL / 1000d));
// }
// } catch (InterruptedException e) {
// // shutdown
// }
// }
// });
// monitoringThread.setDaemon(true);
// monitoringThread.start();
// }
// }
//
// private static int translateMode(String mode) {
// if (mode.equals("r"))
// return FileAccessManager.O_RDONLY;
// if (mode.startsWith("rw"))
// return FileAccessManager.O_RDWR;
// throw new IllegalArgumentException("invalid mode");
// }
//
// private void updateWriteResponse(OSDWriteResponse r) {
// if (r.getNew_file_size().size() > 0) {
// final NewFileSize nfs = r.getNew_file_size().get(0);
// if (wresp == null) {
// wresp = r;
// } else {
// final NewFileSize ofs = wresp.getNew_file_size().get(0);
// if ((nfs.getSize_in_bytes() > ofs.getSize_in_bytes())
// && (nfs.getTruncate_epoch() == ofs.getTruncate_epoch())
// || (nfs.getTruncate_epoch() > ofs.getTruncate_epoch())) {
// wresp = r;
// }
// }
// }
// }
//
// /**
// *
// * @param resultBuffer
// * - the buffer into which the data is read.
// * @param offset
// * - the start offset of the data.
// * @param bytesToRead
// * - the maximum number of bytes read.
// * @return - the total number of bytes read into the buffer, or -1 if there
// * is no more data because the end of the file has been reached.
// * @throws Exception
// * @throws IOException
// */
// public int read(byte[] resultBuffer, int offset, int bytesToRead) throws Exception {
//
// int tmp = byteMapper.read(resultBuffer, offset, bytesToRead, filePos);
// filePos += tmp;
// return tmp;
// }
//
// public ReusableBuffer readObject(long objectNo) throws IOException {
// return readObject(objectNo, 0, stripingPolicy.getStripeSizeForObject(objectNo));
// }
//
// /**
// *
// * @param objectNo
// * - relative object number.
// * @param firstByteInObject
// * - the first byte to be read.
// * @param bytesInObject
// * - the maximal number of bytes to be read.
// * @return a ReusableBuffer containing the data which was read.
// */
// @Override
// public ReusableBuffer readObject(long objectNo, int offset, int length) throws IOException {
//
// RPCResponse<ObjectData> response = null;
//
// int size = 0;
// ObjectData data = null;
// ReusableBuffer buffer = null;
// Iterator<Replica> iterator = this.replicaOrder.iterator();
// while (iterator.hasNext()) { // will be aborted, if object could be read
// Replica replica = iterator.next();
// // check whether capability needs to be renewed
// checkCap();
//
// // get OSD
// ServiceUUID osd = replica.getOSDForObject(objectNo);
// try {
// if (Logging.isDebug())
// Logging.logMessage(Logging.LEVEL_DEBUG, Category.tool, this,
// "%s:%d - read object from OSD %s", fileId, objectNo, osd);
//
// response = osdClient.read(osd.getAddress(), fileId, fileCredentials, objectNo, 0, offset,
// length);
// data = response.get();
//
// if (data.getInvalid_checksum_on_osd()) {
// // try next replica
// if (!iterator.hasNext()) { // all replicas had been tried
// throw new IOException("object " + objectNo + " has an invalid checksum");
// }
// }
//
// // fill up with padding zeros
// if (data.getZero_padding() == 0) {
// buffer = data.getData();
// } else {
// final int dataSize = data.getData().capacity();
// if (data.getData().enlarge(dataSize + data.getZero_padding())) {
// data.getData().position(dataSize);
// while (data.getData().hasRemaining())
// data.getData().put((byte) 0);
// buffer = data.getData();
// buffer.position(0);
// } else {
// buffer = BufferPool.allocate(dataSize + data.getZero_padding());
// buffer.put(data.getData());
// while (buffer.hasRemaining())
// buffer.put((byte) 0);
// buffer.position(0);
// BufferPool.free(data.getData());
// }
// }
//
// // // monitor data for throughput
// // if (Monitoring.isEnabled()) {
// // monitoring.putAverage(MONITORING_KEY_THROUGHPUT,
// // (buffer.limit() / 1024d)
// // / (response.getDuration() / 1000000000d));
// // monitoringReadDataSizeInLastXs.addAndGet(buffer.limit());
// // }
//
// break;
//
// } catch (OSDException ex) {
// if (buffer != null)
// BufferPool.free(buffer);
// // all replicas had been tried or replication has been failed
// if (ex instanceof OSDException)
// if (iterator.hasNext() || ((OSDException) ex).getError_code() != ErrorCodes.IO_ERROR)
// continue;
// throw new IOException("cannot read object", ex);
// } catch (ONCRPCException ex) {
// if (buffer != null)
// BufferPool.free(buffer);
// // all replicas had been tried or replication has been failed
// throw new IOException("cannot read object: " + ex.getMessage(), ex);
// } catch (IOException ex) {
// if (buffer != null)
// BufferPool.free(buffer);
// // all replicas had been tried
// if (!iterator.hasNext()) {
// throw new IOException("cannot read object", ex);
// }
// } catch (InterruptedException ex) {
// // ignore
// } finally {
// if (response != null) {
// response.freeBuffers();
// }
// }
// }
// return buffer;
// }
//
// /**
// *
// * @param objectNo
// * @return
// * @throws IOException
// */
// public int checkObject(long objectNo) throws IOException {
// checkCap();
//
// RPCResponse<ObjectData> response = null;
//
// int size = 0;
// ObjectData data = null;
// ReusableBuffer buffer = null;
// Iterator<Replica> iterator = this.replicaOrder.iterator();
// while (iterator.hasNext()) { // will be aborted, if object could be read
// Replica replica = iterator.next();
// try {
// // get OSD
// ServiceUUID osd = replica.getOSDForObject(objectNo);
//
// response = osdClient.check_object(osd.getAddress(), fileId, fileCredentials, objectNo, 0);
// data = response.get();
//
// if (data.getInvalid_checksum_on_osd()) {
// // try next replica
// if (!iterator.hasNext()) { // all replicas had been tried
// throw new IOException("object " + objectNo + " has an invalid checksum");
// }
// }
//
// size = data.getZero_padding();
//
// break;
// } catch (ONCRPCException ex) {
// if (buffer != null)
// BufferPool.free(buffer);
// // all replicas had been tried or replication has been failed
// if (!iterator.hasNext() || ((OSDException) ex).getError_code() == ErrorCodes.IO_ERROR) {
// throw new IOException("cannot read object", ex);
// }
// } catch (IOException ex) {
// if (buffer != null)
// BufferPool.free(buffer);
// // all replicas had been tried
// if (!iterator.hasNext()) {
// throw new IOException("cannot read object", ex);
// }
// } catch (InterruptedException ex) {
// // ignore
// } finally {
// if (response != null) {
// response.freeBuffers();
// }
// if ((data != null) && (data.getData() != null)) {
// BufferPool.free(data.getData());
// data.setData(null);
// }
// }
// }
// return size;
// }
//
// /**
// * Writes bytesToWrite bytes from the writeFromBuffer byte array starting at
// * offset to this file.
// *
// * @param writeFromBuffer
// * @param offset
// * @param bytesToWrite
// * @return the number of bytes written
// * @throws Exception
// */
// public int write(byte[] writeFromBuffer, int offset, int bytesToWrite) throws Exception {
//
// int tmp = byteMapper.write(writeFromBuffer, offset, bytesToWrite, filePos);
// filePos += bytesToWrite;
// return tmp;
// }
//
// /**
// * Writes...
// *
// * @param firstByteInObject
// * - the start offset in the file
// * @param objectNo
// * - the relative object number
// * @param data
// * - the data to be written.....
// */
// public void writeObject(long firstByteInObject, long objectNo, ReusableBuffer data) throws IOException {
//
// // check whether capability needs to be renewed
// checkCap();
//
// if (isReadOnly)
// throw new IOException("File is marked as read-only. You cannot write anymore.");
//
// RPCResponse<OSDWriteResponse> response = null;
// try {
// // uses always first replica
// ServiceUUID osd = replicaOrder.get(0).getOSDs().get(stripingPolicy.getOSDforObject(objectNo));
// ObjectData odata = new ObjectData(0, false, 0, data);
// response = osdClient.write(osd.getAddress(), fileId, fileCredentials, objectNo, 0,
// (int) firstByteInObject, 0, odata);
// OSDWriteResponse owr = response.get();
// this.updateWriteResponse(owr);
// } catch (ONCRPCException ex) {
// throw new IOException("cannot write object: " + ex.getMessage(), ex);
// } catch (InterruptedException ex) {
// throw new IOException("cannot write object", ex);
// } finally {
// if (response != null)
// response.freeBuffers();
// }
//
// }
//
// public void flush() throws IOException {
// if (wresp != null) {
// RPCResponse r = null;
// try {
// long fs = wresp.getNew_file_size().get(0).getSize_in_bytes();
// int ep = wresp.getNew_file_size().get(0).getTruncate_epoch();
// r = mrcClient.fsetattr(mrcAddress, fileCredentials.getXcap(), new Stat(0, 0, 0, 0, "", "",
// fs, 0, 0, 0, 0, 0, ep, 0), MRCInterface.SETATTR_SIZE);
// r.get();
// wresp = null;
// } catch (ONCRPCException ex) {
// throw new IOException("cannot write object", ex);
// } catch (InterruptedException ex) {
// throw new IOException("cannot write object", ex);
// } finally {
// if (r != null)
// r.freeBuffers();
// }
// }
// }
//
// public void delete() throws Exception {
// checkCap();
//
// if (fileCredentials.getXlocs().getReplicas().size() == 1) {
// RPCResponse<FileCredentialsSet> r = null;
// RPCResponse delR = null;
// try {
// r = mrcClient.unlink(mrcAddress, credentials, volName, pathName);
// FileCredentialsSet fcreds = r.get();
// if (fcreds.size() > 0) {
// // must delete on OSDs too!
// final FileCredentials delCred = fcreds.get(0);
// // uses always first replica
// delR = osdClient.unlink(replicaOrder.get(0).getHeadOsd().getAddress(), fileId, delCred);
// delR.get();
// delR.freeBuffers();
// }
// } catch (ONCRPCException ex) {
// throw new IOException("cannot write object", ex);
// } catch (InterruptedException ex) {
// throw new IOException("cannot write object", ex);
// } finally {
// if (r != null)
// r.freeBuffers();
// }
// } else {
// throw new IOException("There is more than 1 replica existing. Delete all replicas first.");
// }
// }
//
// public long length() throws IOException {
// RPCResponse<StatSet> r = null;
// try {
// r = mrcClient.getattr(mrcAddress, credentials, volName, pathName);
// Stat statInfo = r.get().get(0);
//
// // decide what to use...
// if (wresp != null) {
// final NewFileSize localFS = wresp.getNew_file_size().get(0);
//
// // check if we know a larger file size locally
// if (localFS.getTruncate_epoch() < statInfo.getTruncate_epoch())
// return statInfo.getSize();
// if (localFS.getSize_in_bytes() > statInfo.getSize())
// return localFS.getSize_in_bytes();
// }
// return statInfo.getSize();
// } catch (ONCRPCException ex) {
// throw new IOException("cannot write object", ex);
// } catch (InterruptedException ex) {
// throw new IOException("cannot write object", ex);
// } finally {
// if (r != null)
// r.freeBuffers();
// }
//
// }
//
// public void close() throws IOException {
// flush();
//
// // shutdown
// if (monitoringThread != null)
// monitoringThread.interrupt();
// }
//
// /**
// * Sets the file read-only and changes the access mode to "r", if mode is
// * "true". Sets the file writable and changes the access mode to the
// * original mode, if mode is "false" and no replicas exist.
// *
// * @param mode
// * @throws Exception
// */
// public void setReadOnly(boolean mode) throws Exception {
// if (isReadOnly == mode)
// return;
//
// try {
// if (mode) {
// flush();
//
// // set read only
// RPCResponse<setxattrResponse> r = mrcClient.setxattr(mrcAddress, credentials, volName,
// pathName, "xtreemfs.read_only", "true", 0);
// r.get();
// r.freeBuffers();
//
// forceXCapUpdate();
//
// // get correct filesize
// RPCResponse<Long> r2 = osdClient.internal_get_file_size(replicaOrder.get(0).getHeadOsd()
// .getAddress(), fileId, fileCredentials);
// long filesize = r2.get();
// r2.freeBuffers();
//
// // set filesize on mrc
// forceFileSize(filesize);
//
// forceFileCredentialsUpdate(translateMode("r"));
// } else {
// if (fileCredentials.getXlocs().getReplicas().size() > 1)
// throw new IOException("File has still replicas.");
// else {
// // set read only
// RPCResponse<setxattrResponse> r = mrcClient.setxattr(mrcAddress, credentials, volName,
// pathName, "xtreemfs.read_only", "false", 0);
// r.get();
// r.freeBuffers();
//
// forceFileCredentialsUpdate(this.mode);
// }
// }
// } catch (ONCRPCException ex) {
// throw new IOException("Cannot change objects read-only-state.", ex);
// } catch (InterruptedException ex) {
// throw new IOException("Cannot change objects read-only-state.", ex);
// }
// }
//
// /**
// * adds a replica for this file
// *
// * @param osds
// * @param spPolicy
// * @param replicationFlags
// * @throws Exception
// */
// public void addReplica(List<ServiceUUID> osds, StripingPolicy spPolicy, int replicationFlags)
// throws Exception {
// // check correct parameters
// if (spPolicy.getStripe_size() != stripingPolicy.getPolicy().getStripe_size())
// throw new IllegalArgumentException("New replica must have a stripe size of "
// + stripingPolicy.getPolicy().getStripe_size() + " (given value is "
// + spPolicy.getStripe_size() + ").");
// if (osds.size() != spPolicy.getWidth())
// throw new IllegalArgumentException("Too many or less OSDs in list.");
// for (ServiceUUID osd : osds) {
// if (xLoc.containsOSD(osd)) // OSD is used for any replica so far
// throw new IllegalArgumentException(
// "At least one OSD from list is already used for this file.");
// }
//
// if (isReadOnly) {
// StringSet osdSet = new StringSet();
// for (ServiceUUID osd : osds) {
// osdSet.add(osd.toString());
// }
//
// org.xtreemfs.interfaces.Replica newReplica = new org.xtreemfs.interfaces.Replica(osdSet,
// replicationFlags, spPolicy);
// RPCResponse r = mrcClient.xtreemfs_replica_add(mrcAddress, credentials, fileId, newReplica);
// r.get();
// r.freeBuffers();
//
// forceFileCredentialsUpdate(translateMode("r"));
//
// replicaOrder = replicaSelectionPolicy.getReplicaOrder(xLoc.getReplicas());
// } else
// throw new IOException("File is not marked as read-only.");
// }
//
// /**
// * removes a replica for this file
// *
// * @param replica
// * @throws Exception
// */
// public void removeReplica(Replica replica) throws Exception {
// removeReplica(replica.getHeadOsd());
// }
//
// /**
// * removes a replica for this file
// *
// * @param osd
// * @throws Exception
// */
// public void removeReplica(ServiceUUID osd) throws Exception {
// if (isReadOnly) {
// if (fileCredentials.getXlocs().getReplicas().size() < 2)
// throw new IOException("Cannot remove last replica.");
//
// boolean otherCompleteReplicaExists = false;
// if (xLoc.getReplica(osd).isComplete()) { // complete replica
// // check if another replica is also complete
// for (Replica r : xLoc.getReplicas())
// if (r.isComplete() && !r.equals(xLoc.getReplica(osd))) {
// otherCompleteReplicaExists = true;
// break;
// }
// if (!otherCompleteReplicaExists)
// throw new IOException(
// "This is the last remaining COMPLETE replica. It cannot be removed,"
// + " otherwise it can happen that the file will be destroyed.");
// }
//
// RPCResponse<XCap> r = mrcClient.xtreemfs_replica_remove(mrcAddress, credentials, fileId, osd
// .toString());
// XCap deleteCap = r.get();
// r.freeBuffers();
//
// RPCResponse r2 = osdClient.unlink(osd.getAddress(), fileId, new FileCredentials(deleteCap,
// fileCredentials.getXlocs()));
// r2.get();
// r2.freeBuffers();
//
// forceFileCredentialsUpdate(translateMode("r"));
//
// replicaOrder = replicaSelectionPolicy.getReplicaOrder(xLoc.getReplicas());
// } else
// throw new IOException("File is not marked as read-only.");
// }
//
// /**
// * removes "all" replicas, so that only one (the first) replica exists
// */
// public void removeAllReplicas() throws Exception {
// List<Replica> replicas = xLoc.getReplicas();
// for (int i = 1; i < replicas.size(); i++) {
// removeReplica(replicas.get(i));
// }
// }
//
// /**
// * returns suitable OSDs which can be used for a replica of this file
// *
// * @return
// * @throws Exception
// */
// public List<ServiceUUID> getSuitableOSDsForAReplica() throws Exception {
//
// assert (xLoc.getNumReplicas() > 0);
//
// RPCResponse<StringSet> r = mrcClient.xtreemfs_get_suitable_osds(mrcAddress, fileId, xLoc
// .getReplica(0).getOSDs().size());
// StringSet osds = r.get();
// r.freeBuffers();
//
// ArrayList<ServiceUUID> osdList = new ArrayList<ServiceUUID>();
// for (String osd : osds) {
// ServiceUUID uuid = new ServiceUUID(osd);
// osdList.add(uuid);
// }
// return osdList;
// }
//
// public void setReplicaSelectionPolicy(ReplicaSelectionPolicy policy) {
// replicaSelectionPolicy = policy;
// replicaOrder = replicaSelectionPolicy.getReplicaOrder(xLoc.getReplicas());
// stripingPolicy = replicaOrder.get(0).getStripingPolicy();
// }
//
// // useful for tests
// public void changeReplicaOrder() {
// replicaOrder = replicaSelectionPolicy.getReplicaOrder(xLoc.getReplicas());
// stripingPolicy = replicaOrder.get(0).getStripingPolicy();
// }
//
// /**
// * returns the StripingPolicy of the first replica (but at the moment it is
// * the same for all replicas)
// *
// * @return
// */
// public StripingPolicy getStripingPolicy() {
// return stripingPolicy.getPolicy();
// }
//
// /**
// * returns the stripe size of used replica in bytes
// *
// * @return
// */
// public long getStripeSize() {
// // the stripe size of a file is constant.
// return stripingPolicy.getPolicy().getStripe_size();
// }
//
// public XLocations getXLoc() {
// return xLoc;
// }
//
// public boolean isReadOnly() {
// return isReadOnly;
// }
//
// public Replica getCurrentlyUsedReplica() {
// return replicaOrder.get(0);
// }
//
// public long noOfObjects() throws Exception {
// // all replicas have the same striping policy (more precisely the same
// // stripesize) at the moment
// return stripingPolicy.getObjectNoForOffset(length() - 1);
// }
//
// public String getFileId() {
// return fileId;
// }
//
// public String getPath() {
// return volName + "/" + pathName;
// }
//
// public void seek(long pos) {
// filePos = pos;
// }
//
// public long getFilePointer() {
// return filePos;
// }
//
// public String getStripingPolicyAsString() {
// return fileCredentials.getXlocs().getReplicas().get(0).toString();
// }
//
// public FileCredentials getCredentials() {
// return this.fileCredentials;
// }
//
// private void checkCap() throws IOException {
//
// long time = System.currentTimeMillis();
//
// if (time - capTime > (DEFAULT_CAP_VALIDITY - 60) * 1000) {
// try {
// forceXCapUpdate();
// } catch (Exception e) {
// throw new IOException(e);
// }
// }
// }
//
// private void forceXCapUpdate() throws IOException {
// // update Xcap
// try {
// RPCResponse<XCap> r = mrcClient.xtreemfs_renew_capability(mrcAddress, fileCredentials.getXcap());
// XCap cap = r.get();
// r.freeBuffers();
//
// fileCredentials.setXcap(cap);
//
// capTime = System.currentTimeMillis();
// } catch (Exception e) {
// throw new IOException(e);
// }
// }
//
// /**
// * @throws ONCRPCException
// * @throws IOException
// * @throws InterruptedException
// */
// private void forceFileCredentialsUpdate(int mode) throws ONCRPCException, IOException,
// InterruptedException {
// try {
// RPCResponse<FileCredentials> r = mrcClient.open(mrcAddress, credentials, volName, pathName,
// FileAccessManager.O_CREAT, mode, 0, new VivaldiCoordinates());
// fileCredentials = r.get();
// r.freeBuffers();
// xLoc = new XLocations(fileCredentials.getXlocs(), null);
// isReadOnly = fileCredentials.getXlocs().getReplica_update_policy().equals(
// Constants.REPL_UPDATE_PC_RONLY);
// } catch (InvalidXLocationsException ex) {
// throw new IOException(ex);
// }
// }
//
// public void forceFileSize(long newFileSize) throws IOException {
// RPCResponse r = null;
// try {
// r = mrcClient.fsetattr(mrcAddress, fileCredentials.getXcap(), new Stat(0, 0, 0, 0, "", "",
// newFileSize, 0, 0, 0, 0, 0, fileCredentials.getXcap().getTruncate_epoch(), 0),
// MRCInterface.SETATTR_SIZE);
// r.get();
// } catch (ONCRPCException ex) {
// throw new IOException("cannot update file size", ex);
// } catch (InterruptedException ex) {
// throw new IOException("cannot update file size", ex);
// } finally {
// if (r != null)
// r.freeBuffers();
// }
// }
//
// public Stat stat() throws IOException {
// RPCResponse<StatSet> r = null;
// try {
// r = mrcClient.getattr(mrcAddress, credentials, volName, pathName);
// return r.get().get(0);
// } catch (ONCRPCException ex) {
// throw new IOException("cannot update file size", ex);
// } catch (InterruptedException ex) {
// throw new IOException("cannot update file size", ex);
// } finally {
// if (r != null)
// r.freeBuffers();
// }
// }
//
// public NumberMonitoring getMonitoringInfo() {
// return monitoring;
// }
//}

View File

@@ -0,0 +1,171 @@
/*
* Copyright (c) 2008-2011 by Christian Lorenz, Bjoern Kolbeck,
* Jan Stender, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.config;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.Properties;
import org.xtreemfs.foundation.logging.Logging;
/**
*
* @author bjko
*/
abstract public class Config {
protected final Properties props;
public Config() {
props = new Properties();
}
public Config(Properties prop) {
this.props = new Properties(prop);
}
/** Creates a new instance of {@link Config} */
public Config(String filename) throws IOException {
props = new Properties();
props.load(new FileInputStream(filename));
}
/**
* Writes out a properties-compatible file at the given location.
* @param filename
* @throws IOException
* @throws FileNotFoundException
*/
protected void write(String filename) throws FileNotFoundException, IOException {
props.store(new FileOutputStream(filename), "");
}
protected int readRequiredInt(String paramName) {
String tmp = props.getProperty(paramName);
if (tmp == null)
throw new RuntimeException("property '" + paramName
+ "' is required but was not found");
try {
return Integer.parseInt(tmp.trim());
} catch (NumberFormatException ex) {
throw new RuntimeException("property '" + paramName
+ "' is an integer but '" + tmp + "' is not a valid number");
}
}
protected String readRequiredString(String paramName) {
String tmp = props.getProperty(paramName);
if (tmp == null)
throw new RuntimeException("property '" + paramName
+ "' is required but was not found");
return tmp.trim();
}
protected InetSocketAddress readRequiredInetAddr(String hostParam,
String portParam) {
String host = readRequiredString(hostParam);
int port = readRequiredInt(portParam);
InetSocketAddress isa = new InetSocketAddress(host, port);
return isa;
}
protected boolean readRequiredBoolean(String paramName) {
String tmp = props.getProperty(paramName);
if (tmp == null)
throw new RuntimeException("property '" + paramName
+ "' is required but was not found");
return Boolean.parseBoolean(tmp.trim());
}
protected boolean readOptionalBoolean(String paramName, boolean defaultValue) {
String tmp = props.getProperty(paramName);
if (tmp == null)
return defaultValue;
else
return Boolean.parseBoolean(tmp.trim());
}
protected int readOptionalInt(String paramName, int defaultValue) {
String tmp = props.getProperty(paramName);
if (tmp == null)
return defaultValue;
else
return Integer.parseInt(tmp.trim());
}
protected InetAddress readOptionalInetAddr(String paramName,
InetAddress defaultValue) throws UnknownHostException {
String tmp = props.getProperty(paramName);
if (tmp == null)
return defaultValue;
else
return InetAddress.getByName(tmp);
}
protected InetSocketAddress readOptionalInetSocketAddr(String hostName,
String portParam, InetSocketAddress defaultValue) {
String host = readOptionalString(hostName, null);
int port = readOptionalInt(portParam, -1);
if (host==null || port==-1)
return defaultValue;
else
return new InetSocketAddress(host,port);
}
protected String readOptionalString(String paramName, String defaultValue) {
return props.getProperty(paramName, defaultValue);
}
protected int readOptionalDebugLevel() {
String level = props.getProperty("debug.level");
if (level == null)
return Logging.LEVEL_WARN;
else {
level = level.trim().toUpperCase();
if (level.equals("EMERG")) {
return Logging.LEVEL_EMERG;
} else if (level.equals("ALERT")) {
return Logging.LEVEL_ALERT;
} else if (level.equals("CRIT")) {
return Logging.LEVEL_CRIT;
} else if (level.equals("ERR")) {
return Logging.LEVEL_ERROR;
} else if (level.equals("WARNING")) {
return Logging.LEVEL_WARN;
} else if (level.equals("NOTICE")) {
return Logging.LEVEL_NOTICE;
} else if (level.equals("INFO")) {
return Logging.LEVEL_INFO;
} else if (level.equals("DEBUG")) {
return Logging.LEVEL_DEBUG;
} else {
try {
int levelInt = Integer.valueOf(level);
return levelInt;
} catch (NumberFormatException ex) {
throw new RuntimeException("'" + level +
"' is not a valid level name nor an integer");
}
}
}
}
public Properties getProps() {
return props;
}
}

View File

@@ -0,0 +1,444 @@
/*
* Copyright (c) 2008-2011 by Jan Stender,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.config;
import java.io.File;
import java.io.FileFilter;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.tools.JavaCompiler;
import javax.tools.JavaFileObject;
import javax.tools.StandardJavaFileManager;
import javax.tools.ToolProvider;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.foundation.util.FSUtils;
import org.xtreemfs.foundation.util.OutputUtils;
/**
* A class loader capable of loading policy classes from a given policy
* directory.
* <p>
*
* It allows for an efficient retrieval of policies by means of a policy ID and
* interface. Three requirements have to be met:
* <ul>
* <li>the policy needs to be assignable from any of the given policy interfaces
* <li>the policy either needs to be in the list of built-in policies, or in the
* plug-in directory together with all its dependencies.
* <li>the policy needs to define a field
* <code>public static final long POLICY_ID</code>, which will be used as the ID
* for a later retrieval
* </ul>
*
* <p>
* The class loader distinguishes between <i>built-in</i> and <i>plug-in</i>
* policies. Built-in policies are pre-defined and always available. Plug-in
* policies are compiled, if necessary, and dynamically loaded from the given
* policy directory.
*
* @author stender
*
*/
public class PolicyClassLoader extends ClassLoader {
private final Map<String, Class> cache;
private final Map<Class, Map<Long, Class>> policyMap;
private final Class[] policyInterfaces;
private final Class[] builtInPolicies;
private File policyDir;
private File[] jarFiles;
/**
* Instantiates a new policy class loader.
*
* @param policyDirPath
* the path for the directory with all compiled and uncompiled
* policies
* @param policyInterfaceNames
* the names of all policy interfaces
* @param builtInPolicyNames
* the names of all built-in policies
* @throws IOException
* if an error occurs while initializing the policy interfaces
* or built-in policies
*/
public PolicyClassLoader(String policyDirPath, String[] policyInterfaceNames, String[] builtInPolicyNames)
throws IOException {
this.cache = new HashMap<String, Class>();
this.policyMap = new HashMap<Class, Map<Long, Class>>();
try {
// load policy interfaces
policyInterfaces = new Class[policyInterfaceNames.length];
for (int i = 0; i < policyInterfaceNames.length; i++)
policyInterfaces[i] = Class.forName(policyInterfaceNames[i]);
// load built-in policies
builtInPolicies = new Class[builtInPolicyNames.length];
for (int i = 0; i < builtInPolicyNames.length; i++)
builtInPolicies[i] = Class.forName(builtInPolicyNames[i]);
} catch (ClassNotFoundException exc) {
throw new IOException("could not initialize policy class loader:", exc);
}
// initialize the policy dir file if defined
if (policyDirPath != null)
policyDir = new File(policyDirPath);
}
/**
* Initializes the class loader. This first causes all source code in the
* policy directory to be compiled. In a second step, each class in the
* directory is loaded and checked for assignability to one of the given
* policy interfaces. If the check is successful, the class is added to a
* map, from which it can be efficiently retrieved by means of a policy ID.
*
* @throws IOException
* if an I/O error occurs while compiling or loading any of the
* classes
*/
public void init() throws IOException {
// initialize plug-in policies
if (policyDir != null && policyDir.exists()) {
// get all JAR files
jarFiles = policyDir.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname.getAbsolutePath().endsWith(".jar");
}
});
// get all Java files recursively
File[] javaFiles = FSUtils.listRecursively(policyDir, new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname.getAbsolutePath().endsWith(".java");
}
});
// compile all Java files
if (javaFiles.length != 0) {
String cp = System.getProperty("java.class.path") + ":";
for (int i = 0; i < jarFiles.length; i++) {
cp += jarFiles[i];
if (i != jarFiles.length - 1)
cp += ":";
}
List<String> options = new ArrayList<String>(1);
options.add("-cp");
options.add(cp);
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
if (compiler == null) {
Logging.logMessage(
Logging.LEVEL_WARN,
Category.misc,
this,
"No Java compiler was found to compile additional policies. Make sure that a Java development environment is installed on your system.");
} else {
StandardJavaFileManager fileManager = compiler.getStandardFileManager(null, null, null);
Iterable<? extends JavaFileObject> compilationUnits = fileManager
.getJavaFileObjectsFromFiles(Arrays.asList(javaFiles));
if (!compiler.getTask(null, fileManager, null, options, null, compilationUnits).call())
Logging.logMessage(Logging.LEVEL_WARN, Category.misc, this,
"some policies in '%s' could not be compiled", policyDir.getAbsolutePath());
fileManager.close();
}
}
// retrieve all policies from class files
File[] classFiles = FSUtils.listRecursively(policyDir, new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname.getAbsolutePath().endsWith(".class");
}
});
for (File cls : classFiles) {
try {
String className = cls.getAbsolutePath().substring(
policyDir.getAbsolutePath().length() + 1,
cls.getAbsolutePath().length() - ".class".length()).replace('/', '.');
if (cache.containsKey(className))
continue;
// load the class
Class clazz = loadFromStream(new FileInputStream(cls));
// check whether the class refers to a policy; if so,
// cache it
checkClass(clazz);
} catch (LinkageError err) {
// ignore linkage errors
} catch (Exception exc) {
Logging.logMessage(Logging.LEVEL_WARN, Category.misc, this,
"an error occurred while trying to load class from file " + cls);
Logging.logMessage(Logging.LEVEL_WARN, Category.misc, this, OutputUtils
.stackTraceToString(exc));
}
}
// retrieve all policies from JAR files
// for (File jar : jarFiles) {
//
// JarFile arch = new JarFile(jar);
//
// Enumeration<JarEntry> entries = arch.entries();
// while (entries.hasMoreElements()) {
// JarEntry entry = entries.nextElement();
// if (entry.getName().endsWith(".class")) {
//
// try {
//
// // load the class
// Class clazz = loadFromStream(arch.getInputStream(entry));
//
// // check whether the class refers to a policy; if
// // so, cache it
// checkClass(clazz);
//
// } catch (IOException exc) {
// Logging.logMessage(Logging.LEVEL_WARN, this, "could not load
// class '"
// + entry.getName() + "' from JAR '" + jar.getAbsolutePath() +
// "'");
// Logging.logMessage(Logging.LEVEL_WARN, this, exc);
// } catch (LinkageError err) {
// // ignore
// }
// }
// }
// }
}
// initialize all built-in policies
for (Class polClass : builtInPolicies)
checkClass(polClass);
}
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
// first, check whether the class is cached
if (cache.containsKey(name))
return cache.get(name);
// if not cached, try to resolve the class by means of the system
// class loader
try {
return findSystemClass(name);
} catch (ClassNotFoundException exc) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"could not find system class '%s', trying to define the class", name);
}
if (policyDir == null || !policyDir.exists())
throw new ClassNotFoundException("no built-in policy '" + name
+ "' available, and no plug-in policy directory specified");
// if it could not be loaded w/ the system class loader, try to load it
// from a file and
// define it
try {
File classFile = new File(policyDir.getAbsolutePath() + "/" + name.replace('.', '/') + ".class");
Class clazz = loadFromStream(new FileInputStream(classFile));
if (resolve)
resolveClass(clazz);
return clazz;
} catch (IOException exc) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"could not define class '%s', trying to load the class from a plug-in JAR file", name);
} catch (LinkageError err) {
Logging.logMessage(Logging.LEVEL_WARN, Category.misc, this, "could not define class '%s'", name);
Logging.logMessage(Logging.LEVEL_WARN, Category.misc, this, OutputUtils.stackTraceToString(err));
}
// if the class could not be loaded by the system class loader, try
// to load it from an external JAR file
URL[] urls = new URL[jarFiles.length];
try {
for (int i = 0; i < jarFiles.length; i++)
urls[i] = jarFiles[i].toURI().toURL();
} catch (MalformedURLException exc) {
Logging.logMessage(Logging.LEVEL_ERROR, Category.misc, this, OutputUtils.stackTraceToString(exc));
}
return new URLClassLoader(urls) {
@Override
public URL getResource(String name) {
URL resource = super.getResource(name);
if (resource != null)
return resource;
return PolicyClassLoader.this.getResource(name);
}
@Override
public InputStream getResourceAsStream(String name) {
InputStream stream = super.getResourceAsStream(name);
if (stream != null)
return stream;
return PolicyClassLoader.this.getResourceAsStream(name);
}
}.loadClass(name);
}
/**
* Returns a policy class for a given ID and interface.
*
* @param id
* the policy ID
* @param policyInterface
* the policy interface
* @return a class that has the given ID and is assignable from the given
* interface, or <code>null</code>, if no such class exists
*/
public Class<?> loadClass(long id, Class policyInterface) {
Map<Long, Class> map = policyMap.get(policyInterface);
if (map == null)
return null;
Class clazz = map.get(id);
return clazz;
}
@Override
public URL getResource(String name) {
// first, try to get the resource from the parent class loader
URL resource = super.getResource(name);
if (resource != null)
return resource;
// if no resource could be retrieved, look into the policy directory
File file = new File(policyDir.getAbsolutePath() + "/" + name);
if (file.exists())
try {
return file.toURI().toURL();
} catch (MalformedURLException e) {
return null;
}
return null;
}
@Override
public InputStream getResourceAsStream(String name) {
// first, try to get the stream from the parent class loader
InputStream stream = super.getResourceAsStream(name);
if (stream != null)
return stream;
// if no stream could be retrieved, look into the policy directory
File file = new File(policyDir.getAbsolutePath() + "/" + name);
try {
return new FileInputStream(file);
} catch (FileNotFoundException exc) {
return null;
}
}
private Class loadFromStream(InputStream in) throws IOException {
// load the binary class content
byte[] classData = new byte[in.available()];
in.read(classData);
in.close();
Class clazz = defineClass(null, classData, 0, classData.length);
cache.put(clazz.getName(), clazz);
return clazz;
}
private void checkClass(Class clazz) {
// check whether the class matches any of the policy
// interfaces
for (Class ifc : policyInterfaces) {
if (ifc.isAssignableFrom(clazz)) {
// get the policy ID
try {
long policyId = clazz.getDeclaredField("POLICY_ID").getLong(null);
// add the policy to the internal map
Map<Long, Class> polIdMap = policyMap.get(ifc);
if (polIdMap == null) {
polIdMap = new HashMap<Long, Class>();
policyMap.put(ifc, polIdMap);
}
if (polIdMap.containsKey(policyId))
Logging.logMessage(Logging.LEVEL_WARN, Category.misc, this,
"duplicate ID for policy '%s': %d", ifc.getName(), policyId);
polIdMap.put(policyId, clazz);
} catch (Exception exc) {
Logging.logMessage(Logging.LEVEL_WARN, this, "could not load malformed policy '%s'",
clazz.getName());
Logging.logMessage(Logging.LEVEL_WARN, this, OutputUtils.stackTraceToString(exc));
}
}
}
}
}

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2008-2011 by Jan Stender,
* Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.config;
import java.io.IOException;
import org.xtreemfs.foundation.SSLOptions.TrustManager;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
public class PolicyContainer {
protected final ServiceConfig config;
protected final PolicyClassLoader policyClassLoader;
public PolicyContainer(ServiceConfig config) throws IOException {
this.config = config;
this.policyClassLoader = new PolicyClassLoader(config.getPolicyDir(), new String[0], new String[0]);
policyClassLoader.init();
}
protected PolicyContainer(ServiceConfig config, PolicyClassLoader policyClassLoader) throws IOException {
this.config = config;
this.policyClassLoader = policyClassLoader;
policyClassLoader.init();
}
public TrustManager getTrustManager() throws ClassNotFoundException, InstantiationException,
IllegalAccessException {
String trustManager = config.getTrustManager();
if (trustManager == null || trustManager.equals(""))
return null;
// first, check whether a built-in policy exists with the given name
try {
return (TrustManager) Class.forName(trustManager).newInstance();
} catch (Exception exc) {
if (Logging.isDebug())
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"no built-in policy '%s' exists, searching for plug-in policies...", config
.getTrustManager());
}
// if no built-in policy could be found, check for plug-in policy
// directory
// if the class file could be found, load it
Class cls = policyClassLoader.loadClass(trustManager);
return (TrustManager) cls.newInstance();
}
}

View File

@@ -0,0 +1,83 @@
/*
* Copyright (c) 2010-2011 by Paul Seiferth, Zuse Institute Berlin
* 2012 by Michael Berlin, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.config;
import java.io.FileInputStream;
import java.util.HashMap;
import org.xtreemfs.dir.DIRClient;
import org.xtreemfs.foundation.SSLOptions;
import org.xtreemfs.foundation.TimeSync;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.pbrpc.client.RPCNIOSocketClient;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.Auth;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.AuthType;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.Configuration;
import org.xtreemfs.pbrpc.generatedinterfaces.DIRServiceClient;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.KeyValuePair;
public class RemoteConfigHelper {
public static ServiceConfig getConfigurationFromDIR(ServiceConfig config) throws Exception {
TimeSync ts = null;
boolean timeSyncAlreadyRunning = true;
try {
final int WAIT_BETWEEN_RETRIES = 1000;
int retries = config.getWaitForDIR() * 1000 / WAIT_BETWEEN_RETRIES;
if (retries <= 0) {
retries = 1;
}
Logging.logMessage(Logging.LEVEL_INFO, null, "Loading configuration from DIR (will retry up to %d times)", retries);
SSLOptions sslOptions;
sslOptions = config.isUsingSSL() ? new SSLOptions(new FileInputStream(
config.getServiceCredsFile()), config.getServiceCredsPassphrase(),
config.getServiceCredsContainer(), new FileInputStream(config.getTrustedCertsFile()),
config.getTrustedCertsPassphrase(), config.getTrustedCertsContainer(), false,
config.isGRIDSSLmode(), config.getSSLProtocolString(),
new PolicyContainer(config).getTrustManager()) : null;
RPCNIOSocketClient clientStage = new RPCNIOSocketClient(sslOptions, 1000, 60 * 1000, "RemoteConfigHelper");
DIRServiceClient dirRPCClient = new DIRServiceClient(clientStage, config.getDirectoryService());
DIRClient dirClient = new DIRClient(dirRPCClient, config.getDirectoryServices(), retries,
WAIT_BETWEEN_RETRIES);
clientStage.start();
clientStage.waitForStartup();
timeSyncAlreadyRunning = TimeSync.isInitialized();
if (!timeSyncAlreadyRunning) {
ts = TimeSync.initializeLocal(0);
ts.waitForStartup();
}
Auth authNone = Auth.newBuilder().setAuthType(AuthType.AUTH_NONE).build();
UserCredentials uc = UserCredentials.newBuilder().setUsername("main-method")
.addGroups("xtreemfs-services").build();
Configuration conf = dirClient.xtreemfs_configuration_get(null, authNone, uc, config.getUUID()
.toString());
clientStage.shutdown();
clientStage.waitForShutdown();
HashMap<String, String> returnMap = new HashMap<String, String>();
for (KeyValuePair kvp : conf.getParameterList()) {
returnMap.put(kvp.getKey(), kvp.getValue());
}
return new ServiceConfig(returnMap);
} finally {
if (!timeSyncAlreadyRunning && ts != null) {
ts.close();
}
}
};
}

View File

@@ -0,0 +1,786 @@
/*
* Copyright (c) 2008-2011 by Christian Lorenz, Bjoern Kolbeck,
* Jan Stender, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.config;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.StringTokenizer;
import org.xtreemfs.common.uuids.ServiceUUID;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.foundation.pbrpc.Schemes;
public class ServiceConfig extends Config {
final private static Category[] debugCategoryDefault = { Category.all };
public static enum Parameter {
/*
* general configuration parameter
*/
DEBUG_LEVEL("debug.level", 6, Integer.class, false),
DEBUG_CATEGORIES("debug.categories", debugCategoryDefault, Category[].class, false),
DIRECTORY_SERVICE("dir_service.host", null, InetSocketAddress.class, true),
DIRECTORY_SERVICE0("dir_service.0.host", null, InetSocketAddress.class, false),
DIRECTORY_SERVICE1("dir_service.1.host", null, InetSocketAddress.class, false),
DIRECTORY_SERVICE2("dir_service.2.host", null, InetSocketAddress.class, false),
DIRECTORY_SERVICE3("dir_service.3.host", null, InetSocketAddress.class, false),
DIRECTORY_SERVICE4("dir_service.4.host", null, InetSocketAddress.class, false),
PORT("listen.port", null, Integer.class, true),
HTTP_PORT("http_port", null, Integer.class, true),
LISTEN_ADDRESS("listen.address", null, InetAddress.class, false),
USE_SSL("ssl.enabled", false, Boolean.class, false),
SSL_PROTOCOL_STRING("ssl.protocol", null, String.class, false),
SERVICE_CREDS_FILE("ssl.service_creds", null, String.class, false ),
SERVICE_CREDS_PASSPHRASE("ssl.service_creds.pw", null, String.class, false),
SERVICE_CREDS_CONTAINER("ssl.service_creds.container", null, String.class, false),
TRUSTED_CERTS_FILE("ssl.trusted_certs", null, String.class, false),
TRUSTED_CERTS_CONTAINER("ssl.trusted_certs.container", null, String.class, false),
TRUSTED_CERTS_PASSPHRASE("ssl.trusted_certs.pw", null, String.class, false),
TRUST_MANAGER("ssl.trust_manager", "", String.class, false),
GEO_COORDINATES("geographic_coordinates", "", String.class, false ),
ADMIN_PASSWORD("admin_password", "", String.class, false),
HOSTNAME("hostname", "", String.class, false ),
USE_GRID_SSL_MODE("ssl.grid_ssl", false, Boolean.class, false),
WAIT_FOR_DIR("startup.wait_for_dir", 30, Integer.class, false),
POLICY_DIR("policy_dir", "/etc/xos/xtreemfs/policies/", String.class, false),
USE_SNMP("snmp.enabled", false, Boolean.class, false),
SNMP_ADDRESS("snmp.address", null, InetAddress.class, false),
SNMP_PORT("snmp.port", null, Integer.class, false),
SNMP_ACL("snmp.aclfile", null, String.class, false),
FAILOVER_MAX_RETRIES("failover.retries", 15, Integer.class, false),
FAILOVER_WAIT("failover.wait_ms", 15 * 1000, Integer.class, false),
MAX_CLIENT_Q("max_client_queue", 100, Integer.class, false),
MAX_REQUEST_QUEUE_LENGTH("max_requests_queue_length", 1000, Integer.class, false),
USE_MULTIHOMING("multihoming.enabled", false, Boolean.class, false),
USE_RENEWAL_SIGNAL("multihoming.renewal_signal", false, Boolean.class, false ),
/*
* DIR specific configuration parameter
*/
AUTODISCOVER_ENABLED("discover", true, Boolean.class, false),
MONITORING_ENABLED("monitoring.enabled", false, Boolean.class, false ),
ADMIN_EMAIL("monitoring.email.receiver", "", String.class, false),
SENDER_ADDRESS("monitoring.email.sender", "XtreemFS DIR monitoring <dir@localhost>", String.class, false),
MAX_WARNINGS("monitoring.max_warnings", 1, Integer.class, false),
SENDMAIL_BIN("monitoring.email.programm", "/usr/sbin/sendmail", String.class, false),
TIMEOUT_SECONDS("monitoring.service_timeout_s", 5 * 60, Integer.class, false),
VIVALDI_MAX_CLIENTS("vivaldi.max_clients", 32, Integer.class, false),
VIVALDI_CLIENT_TIMEOUT("vivaldi.client_timeout", 600000, Integer.class, false), // default: twice the recalculation interval
/*
* MRC specific configuration parameter
*/
UUID("uuid", null, ServiceUUID.class, true),
LOCAL_CLOCK_RENEW("local_clock_renewal", null, Integer.class, true),
REMOTE_TIME_SYNC("remote_time_sync", null, Integer.class, true),
OSD_CHECK_INTERVAL("osd_check_interval", null, Integer.class, true),
NOATIME("no_atime", null, Boolean.class, true),
AUTHENTICATION_PROVIDER("authentication_provider", null, String.class, true),
CAPABILITY_SECRET("capability_secret", null, String.class, true),
CAPABILITY_TIMEOUT("capability_timeout", 600, Integer.class, false),
RENEW_TIMED_OUT_CAPS("renew_to_caps", false, Boolean.class, false),
/*
* OSD specific configuration parameter
*/
OBJECT_DIR("object_dir", null, String.class, true),
REPORT_FREE_SPACE("report_free_space", null, Boolean.class, true),
CHECKSUM_ENABLED("checksums.enabled", false, Boolean.class, false),
CHECKSUM_PROVIDER("checksums.algorithm", null, String.class, false),
STORAGE_LAYOUT("storage_layout", "HashStorageLayout", String.class, false),
IGNORE_CAPABILITIES("ignore_capabilities", false, Boolean.class, false),
/** Maximum assumed drift between two server clocks. If the drift is higher, the system may not function properly. */
FLEASE_DMAX_MS("flease.dmax_ms", 1000, Integer.class, false),
FLEASE_LEASE_TIMEOUT_MS("flease.lease_timeout_ms", 14000, Integer.class, false),
/** Message timeout. Maximum allowed in-transit time for a Flease message. */
FLEASE_MESSAGE_TO_MS("flease.message_to_ms", 500, Integer.class, false),
FLEASE_RETRIES("flease.retries", 3, Integer.class, false),
SOCKET_SEND_BUFFER_SIZE("socket.send_buffer_size", -1, Integer.class, false),
SOCKET_RECEIVE_BUFFER_SIZE("socket.recv_buffer_size", -1, Integer.class, false),
VIVALDI_RECALCULATION_INTERVAL_IN_MS("vivaldi.recalculation_interval_ms", 300000, Integer.class, false),
VIVALDI_RECALCULATION_EPSILON_IN_MS("vivaldi.recalculation_epsilon_ms", 30000, Integer.class, false),
VIVALDI_ITERATIONS_BEFORE_UPDATING("vivaldi.iterations_before_updating", 12, Integer.class, false),
VIVALDI_MAX_RETRIES_FOR_A_REQUEST("vivaldi.max_retries_for_a_request", 2, Integer.class, false),
VIVALDI_MAX_REQUEST_TIMEOUT_IN_MS("vivaldi.max_request_timeout_ms", 10000, Integer.class, false),
VIVALDI_TIMER_INTERVAL_IN_MS("vivaldi.timer_interval_ms", 60000, Integer.class, false),
STORAGE_THREADS("storage_threads", 1, Integer.class, false),
HEALTH_CHECK("health_check", "", String.class, false),
/*
* Benchmark specific configuration parameter
*/
BASEFILE_SIZE_IN_BYTES("basefilesize_in_bytes", 3221225472L, Long.class, false), // 3221225472L = 3 GiB
FILESIZE("filesize", 4096, Integer.class, false), // 4096 = 4 KiB
USERNAME("username", "benchmark", String.class, false),
GROUP("group", "benchmark", String.class, false),
OSD_SELECTION_POLICIES("osd_selection_policies", "", String.class, false),
REPLICATION_POLICY("replication_policy", "", String.class, false),
REPLICATION_FACTOR("replication_factor", 3, Integer.class, false),
CHUNK_SIZE_IN_BYTES("chunk_size_in_bytes", 131072, Integer.class, false), // 131072 = 128 KiB
STRIPE_SIZE_IN_BYTES("stripe_size_in_bytes", 131072, Integer.class, false), // 131072 = 128 KiB
STRIPE_SIZE_SET("stripe_size_set", false, Boolean.class, false),
STRIPE_WIDTH("stripe_width", 1, Integer.class, false),
STRIPE_WIDTH_SET("stripe_width_set", false, Boolean.class, false),
NO_CLEANUP("no_cleanup", false, Boolean.class, false),
NO_CLEANUP_VOLUMES("no_cleanup_volumes", false, Boolean.class, false),
NO_CLEANUP_BASEFILE("no_cleanup_basefile", false, Boolean.class, false),
OSD_CLEANUP("osd_cleanup", false, Boolean.class, false);
Parameter(String propString, Object defaultValue, Class propClass, Boolean req) {
propertyString = propString;
this.defaultValue = defaultValue;
propertyClass = propClass;
required = req;
}
/**
* number of values the enumeration contains
*/
private static final int size = Parameter.values().length;
/**
* String representation of the parameter in .property file
*/
private final String propertyString;
/**
* Class of the parameter. Used for deserilization. Note: If you add a new Class type, don't forget to
* update the ServiceConfig(HashMap <String,String>) constructor
*/
private final Class propertyClass;
/**
* Default parameter which will be used if there is neither a Parameter in the properties file nor in
* the DIR
*/
private final Object defaultValue;
/**
* True if this is a required parameter. False otherwise.
*/
private final Boolean required;
public String getPropertyString() {
return propertyString;
}
public Object getDefaultValue() {
return defaultValue;
}
public Class getPropertyClass() {
return propertyClass;
}
public static int getSize() {
return size;
}
public Boolean isRequired() {
return required;
}
public Boolean isOptional() {
return !required;
}
public static Parameter getParameterFromString(String s) throws RuntimeException {
for (Parameter parm : Parameter.values()) {
if (s.equals(parm.getPropertyString()))
return parm;
}
throw new RuntimeException("Configuration parameter " + s + " doesn't exist!");
}
}
/**
* Parameter which are required to connect to the DIR.
*
*/
private final Parameter[] connectionParameter = {
Parameter.DEBUG_CATEGORIES,
Parameter.DEBUG_LEVEL,
Parameter.HOSTNAME,
Parameter.DIRECTORY_SERVICE,
Parameter.WAIT_FOR_DIR,
Parameter.PORT,
Parameter.USE_SSL,
Parameter.UUID
};
/**
* Checks if there are all required configuration parameter to initialize a connection to the DIR and
* request the rest of the configuration
*
* @return {@link Boolean}
*/
public Boolean isInitializable() {
for (Parameter param : connectionParameter) {
if (parameter.get(param) == null) {
throw new RuntimeException("property '" + param.getPropertyString()
+ "' is required but was not found");
}
}
checkSSLConfiguration();
return true;
}
public Parameter[] getConnectionParameter() {
return this.connectionParameter;
}
/**
* reads only the given Parameters from the config file
*
* @throws IOException
*/
public void readParameters(Parameter[] params) throws IOException {
for (Parameter param : params) {
parameter.put(param, readParameter(param));
}
setDefaults(params);
}
protected EnumMap<Parameter, Object> parameter = new EnumMap<Parameter, Object>(
Parameter.class);
public static final String OSD_CUSTOM_PROPERTY_PREFIX = "config.";
public ServiceConfig() {
super();
}
public ServiceConfig(Properties prop) {
super(prop);
}
public ServiceConfig(String filename) throws IOException {
super(filename);
}
public ServiceConfig(HashMap<String, String> hm) {
super();
/*
* Create a configuration from String Key-Values of a HashMap
*/
for (Entry<String, String> entry : hm.entrySet()) {
// ignore custom configuration properties for OSDs here
if (entry.getKey().startsWith(OSD_CUSTOM_PROPERTY_PREFIX)) {
continue;
}
Parameter param = null;
try {
param = Parameter.getParameterFromString(entry.getKey());
} catch (RuntimeException e) {
e.printStackTrace();
}
/* Integer values */
if (Integer.class == param.getPropertyClass()) {
parameter.put(param, Integer.parseInt(entry.getValue()));
}
/* Long values */
if (Long.class == param.getPropertyClass()) {
parameter.put(param, Long.parseLong(entry.getValue()));
}
/* String values */
if (String.class == param.getPropertyClass()) {
parameter.put(param, entry.getValue());
}
/* Boolean values */
if (Boolean.class == param.getPropertyClass()) {
parameter.put(param, Boolean.valueOf(entry.getValue()));
}
/* ServiceUUID values */
if (ServiceUUID.class == param.getPropertyClass()) {
parameter.put(param, new ServiceUUID(entry.getValue()));
}
/* InetAddress values */
if (InetAddress.class == param.getPropertyClass()) {
InetAddress inetAddr = null;
try {
inetAddr = InetAddress.getByName(entry.getValue().substring(
entry.getValue().indexOf('/') + 1));
} catch (UnknownHostException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
parameter.put(param, inetAddr);
}
/* InetSocketAddress values */
if (InetSocketAddress.class == param.getPropertyClass()) {
/*
* Get a host and port of a string like 'hostname/192.168.2.141:36365' and create a
* InetSocketAddress
*/
String host = entry.getValue().substring(0, entry.getValue().indexOf("/"));
String port = entry.getValue().substring(entry.getValue().lastIndexOf(":") + 1);
InetSocketAddress isa = new InetSocketAddress(host, Integer.parseInt(port));
parameter.put(param, isa);
}
/* Category[] values */
if (Category[].class == param.getPropertyClass()) {
StringTokenizer stk = new StringTokenizer(entry.getValue(), ", ");
Category[] catArray = new Category[stk.countTokens()];
int count = 0;
while (stk.hasMoreElements()) {
catArray[count] = Category.valueOf(stk.nextToken());
count++;
}
parameter.put(param, catArray);
}
}
}
/**
* Merges a second configuration in this one. Only required parameters which aren't set will be used from
* the new configuration.
*
* @param conf
*/
public void mergeConfig(ServiceConfig conf) {
for (Entry<Parameter, Object> entry : conf.parameter.entrySet()) {
if (entry.getKey().isRequired() && parameter.get(entry.getKey()) == null) {
parameter.put(entry.getKey(), entry.getValue());
}
}
}
/**
* Set the default value for a specific Parameter
*
* @param param
* - {@link Parameter}
*/
public void setDefaults(Parameter param) {
if (parameter.get(param) == null) {
parameter.put(param, param.getDefaultValue());
}
}
/**
* Set the default values for the parameter in p
*
* @param p
*/
public void setDefaults(Parameter[] p) {
for (Parameter parm : p) {
if (parm.isOptional() && parameter.get(parm) == null) {
parameter.put(parm, parm.getDefaultValue());
}
}
}
protected int readDebugLevel() {
String level = props.getProperty("debug.level");
if (level == null)
return Logging.LEVEL_INFO;
else {
level = level.trim().toUpperCase();
if (level.equals("EMERG")) {
return Logging.LEVEL_EMERG;
} else if (level.equals("ALERT")) {
return Logging.LEVEL_ALERT;
} else if (level.equals("CRIT")) {
return Logging.LEVEL_CRIT;
} else if (level.equals("ERR")) {
return Logging.LEVEL_ERROR;
} else if (level.equals("WARNING")) {
return Logging.LEVEL_WARN;
} else if (level.equals("NOTICE")) {
return Logging.LEVEL_NOTICE;
} else if (level.equals("INFO")) {
return Logging.LEVEL_INFO;
} else if (level.equals("DEBUG")) {
return Logging.LEVEL_DEBUG;
} else {
try {
int levelInt = Integer.valueOf(level);
return levelInt;
} catch (NumberFormatException ex) {
throw new RuntimeException("'" + level + "' is not a valid level name nor an integer");
}
}
}
}
/**
* Read configuration parameter from property file and return an Object of the value if the parameter was
* set. Else return null.
*
* @param param the parameter
*
* @return Object
*/
protected Object readParameter(Parameter param) {
String tmpString = props.getProperty(param.getPropertyString());
if (tmpString == null) {
return null;
}
// Integer values
if (Integer.class == param.getPropertyClass()) {
return Integer.parseInt(tmpString.trim());
}
/* Long values */
if (Long.class == param.getPropertyClass()) {
return(Long.parseLong(tmpString.trim()));
}
// Boolean values
if (Boolean.class == param.getPropertyClass()) {
return Boolean.parseBoolean(tmpString.trim());
}
// String values
if (String.class == param.getPropertyClass()) {
return tmpString.trim();
}
// ServiceUUID values
if (ServiceUUID.class == param.getPropertyClass()) {
return new ServiceUUID(tmpString);
}
// InetAddress values
if (InetAddress.class == param.getPropertyClass()) {
InetAddress iAddr = null;
try {
iAddr = InetAddress.getByName(tmpString);
} catch (Exception e) {
e.printStackTrace();
}
return iAddr;
}
// InetSocketAddress values
if (InetSocketAddress.class == param.getPropertyClass()) {
// assumes that the parameter in the property file like
// "foobar.host" and "foobar.port" if you
// want to read a InetSocketAddress
return readRequiredInetAddr(param.getPropertyString(),
param.getPropertyString().replaceAll("host", "port"));
}
// Category[] values
if (Category[].class == param.getPropertyClass()) {
return readCategories(param.getPropertyString());
}
return null;
}
protected Category[] readCategories(String property) {
String tmp = this.readOptionalString(property, "");
StringTokenizer st = new StringTokenizer(tmp, " \t,");
List<Category> cats = new LinkedList<Category>();
while (st.hasMoreTokens()) {
String token = st.nextToken();
try {
cats.add(Category.valueOf(token));
} catch (IllegalArgumentException exc) {
System.err.println("invalid logging category: " + token);
}
}
if (cats.size() == 0)
cats.add(Category.all);
return cats.toArray(new Category[cats.size()]);
}
public HashMap<String, String> toHashMap() {
HashMap<String, String> hm = new HashMap<String, String>();
for (Parameter param : Parameter.values()) {
if (parameter.get(param) != null) {
if (Category[].class == param.getPropertyClass()) {
Category[] debugCategories = (Category[]) parameter.get(param);
String putString = "";
boolean firstValue = true;
for (Category cat : debugCategories) {
if (firstValue) {
putString = putString + cat.toString();
firstValue = false;
} else {
putString += ", " + cat.toString();
}
}
hm.put(param.getPropertyString(), putString);
} else {
hm.put(param.getPropertyString(), parameter.get(param).toString());
}
}
}
return hm;
}
public int getDebugLevel() {
return (Integer) parameter.get(Parameter.DEBUG_LEVEL);
}
public Category[] getDebugCategories() {
return (Category[]) parameter.get(Parameter.DEBUG_CATEGORIES);
}
public int getPort() {
return (Integer) parameter.get(Parameter.PORT);
}
public int getHttpPort() {
return (Integer) parameter.get(Parameter.HTTP_PORT);
}
public InetAddress getAddress() {
return (InetAddress) parameter.get(Parameter.LISTEN_ADDRESS);
}
public boolean isUsingSSL() {
return (Boolean) parameter.get(Parameter.USE_SSL);
}
public String getServiceCredsContainer() {
return (String) parameter.get(Parameter.SERVICE_CREDS_CONTAINER);
}
public String getServiceCredsFile() {
return (String) parameter.get(Parameter.SERVICE_CREDS_FILE);
}
public String getServiceCredsPassphrase() {
return (String) parameter.get(Parameter.SERVICE_CREDS_PASSPHRASE);
}
public String getTrustedCertsContainer() {
return (String) parameter.get(Parameter.TRUSTED_CERTS_CONTAINER);
}
public String getTrustedCertsFile() {
return (String) parameter.get(Parameter.TRUSTED_CERTS_FILE);
}
public String getTrustedCertsPassphrase() {
return (String) parameter.get(Parameter.TRUSTED_CERTS_PASSPHRASE);
}
public String getTrustManager() {
return (String) parameter.get(Parameter.TRUST_MANAGER);
}
public String getGeoCoordinates() {
return (String) parameter.get(Parameter.GEO_COORDINATES);
}
public void setGeoCoordinates(String geoCoordinates) {
parameter.put(Parameter.GEO_COORDINATES, geoCoordinates);
}
public String getAdminPassword() {
return (String) parameter.get(Parameter.ADMIN_PASSWORD);
}
public String getHostName() {
return (String) parameter.get(Parameter.HOSTNAME);
}
public ServiceUUID getUUID() {
return (ServiceUUID) parameter.get(Parameter.UUID);
}
/**
* @return the useFakeSSLmodeport
*/
public boolean isGRIDSSLmode() {
return parameter.get(Parameter.USE_GRID_SSL_MODE) != null
&& (Boolean) parameter.get(Parameter.USE_GRID_SSL_MODE);
}
public String getSSLProtocolString() {
return (String) parameter.get(Parameter.SSL_PROTOCOL_STRING);
}
public int getWaitForDIR() {
return (Integer) parameter.get(Parameter.WAIT_FOR_DIR);
}
public String getURLScheme() {
if (isUsingSSL()) {
if (isGRIDSSLmode()) {
return Schemes.SCHEME_PBRPCG;
} else {
return Schemes.SCHEME_PBRPCS;
}
}
return Schemes.SCHEME_PBRPC;
}
public String getPolicyDir() {
return (String) parameter.get(Parameter.POLICY_DIR);
}
public Boolean isUsingSnmp() {
return (Boolean) parameter.get(Parameter.USE_SNMP);
}
public InetAddress getSnmpAddress() {
return (InetAddress) parameter.get(Parameter.SNMP_ADDRESS);
}
public Integer getSnmpPort() {
return (Integer) parameter.get(Parameter.SNMP_PORT);
}
public String getSnmpACLFile() {
return (String) parameter.get(Parameter.SNMP_ACL);
}
public Integer getFailoverMaxRetries() {
return (Integer) parameter.get(Parameter.FAILOVER_MAX_RETRIES);
}
public Integer getFailoverWait() {
return (Integer) parameter.get(Parameter.FAILOVER_WAIT);
}
public InetSocketAddress getDirectoryService() {
return (InetSocketAddress) parameter.get(Parameter.DIRECTORY_SERVICE);
}
public InetSocketAddress[] getDirectoryServices() {
List<InetSocketAddress> addresses = new ArrayList<InetSocketAddress>();
addresses.add((InetSocketAddress) parameter.get(Parameter.DIRECTORY_SERVICE));
if (parameter.get(Parameter.DIRECTORY_SERVICE0) != null) {
addresses.add((InetSocketAddress) parameter.get(Parameter.DIRECTORY_SERVICE0));
}
if (parameter.get(Parameter.DIRECTORY_SERVICE1) != null) {
addresses.add((InetSocketAddress) parameter.get(Parameter.DIRECTORY_SERVICE1));
}
if (parameter.get(Parameter.DIRECTORY_SERVICE2) != null) {
addresses.add((InetSocketAddress) parameter.get(Parameter.DIRECTORY_SERVICE2));
}
if (parameter.get(Parameter.DIRECTORY_SERVICE3) != null) {
addresses.add((InetSocketAddress) parameter.get(Parameter.DIRECTORY_SERVICE3));
}
if (parameter.get(Parameter.DIRECTORY_SERVICE4) != null) {
addresses.add((InetSocketAddress) parameter.get(Parameter.DIRECTORY_SERVICE4));
}
return addresses.toArray(new InetSocketAddress[0]);
}
public void setDirectoryService(InetSocketAddress addr) {
parameter.put(Parameter.DIRECTORY_SERVICE, addr);
}
/**
* Checks if the SSL Configuration is valid. If not throws a {@link RuntimeException}.
*
* @throws RuntimeException
*/
public void checkSSLConfiguration() {
Parameter[] sslRelatedParameter = { Parameter.SERVICE_CREDS_CONTAINER, Parameter.SERVICE_CREDS_FILE,
Parameter.SERVICE_CREDS_PASSPHRASE, Parameter.TRUSTED_CERTS_CONTAINER,
Parameter.TRUSTED_CERTS_FILE, Parameter.TRUSTED_CERTS_PASSPHRASE };
if (isUsingSSL() == true) {
for (Parameter param : sslRelatedParameter) {
if (parameter.get(param) == null) {
throw new RuntimeException("for SSL " + param.getPropertyString() + " must be set!");
}
}
} else {
if (parameter.get(Parameter.USE_GRID_SSL_MODE) != null) {
if (isGRIDSSLmode()) {
throw new RuntimeException(
"ssl must be enabled to use the grid_ssl mode. Please make sure to set ssl.enabled = true and to configure all SSL options.");
}
}
}
}
/**
* Checks if the multihoming configuration is valid. If not throws a {@link RuntimeException}.
*
* @throws RuntimeException
*/
protected void checkMultihomingConfiguration() {
if (isUsingMultihoming() && getAddress() != null) {
throw new RuntimeException(ServiceConfig.Parameter.USE_MULTIHOMING.getPropertyString() + " and "
+ ServiceConfig.Parameter.LISTEN_ADDRESS.getPropertyString() + " parameters are incompatible.");
}
}
protected void checkConfig(Parameter[] params) {
for (Parameter param : params) {
if (param.isRequired() && parameter.get(param) == null) {
throw new RuntimeException("property '" + param.getPropertyString()
+ "' is required but was not found");
}
}
this.checkSSLConfiguration();
}
public boolean isUsingRenewalSignal() {
return (Boolean) parameter.get(Parameter.USE_RENEWAL_SIGNAL);
}
public boolean isUsingMultihoming() {
return (Boolean) parameter.get(Parameter.USE_MULTIHOMING);
}
}

View File

@@ -0,0 +1,161 @@
/*
* Copyright (c) 2012 by Lukas Kairies, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.VolumeNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.XtreemFSException;
import org.xtreemfs.foundation.SSLOptions;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.Service;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.ServiceSet;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.ServiceStatus;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.ServiceType;
public interface AdminClient extends Client {
/**
* Open an admin volume and use the returned class to access it.
*
* @throws AddressToUUIDNotFoundException
* @throws VolumeNotFoundException
* @thorws IOException
* @throws {@link IOException}
*/
public AdminVolume openVolume(String volumeName, SSLOptions sslOptions, Options options)
throws AddressToUUIDNotFoundException, VolumeNotFoundException, IOException;
/**
* Starts a cleanup on the OSD with the given UUID.
*
* @param osdUUID
* UUID of the OSD
* @param password
* admin password
* @param remove
* erase potential zombies
* @param deleteVolumes
* deletes volumes that might be dead
* @param restore
* restore zombies found on the OSD
* @param removeMetadata
* remove metadata from deleted or abandoned files
* @param metaDataTimeoutS
* time in seconds to wait after the last view update before deleting metadata
* @throws IOException
*/
public void startCleanUp(String osdUUID, String password, boolean remove, boolean deleteVolumes, boolean restore,
boolean removeMetadata, int metaDataTimeoutS) throws IOException;
/**
* Run a version cleanup (only if file content versioning is enabled).
*
* @param osdUUID
* UUID of the OSD
* @param password
* admin password
* @throws IOException
*/
public void startVersionCleanUp(String osdUUID, String password) throws IOException;
/**
* Suspends the currently running cleanup process.
*
* @param osdUUID
* UUID of the OSD
* @param password
* admin password
* @throws IOException
*/
public void stopCleanUp(String osdUUID, String password) throws IOException;
/**
* Returns true if a cleanup is running.
*
* @param osdUUID
* UUID of the OSD
* @param password
* admin password
* @throws IOException
*/
public boolean isRunningCleanUp(String osdUUID, String password) throws IOException;
/**
* Returns the current cleanup state.
*
* @param osdUUID
* UUID of the OSD
* @param password
* admin password
* @throws IOException
*
*/
public String getCleanUpState(String osdUUID, String password) throws IOException;
/**
* Returns the cleanup Result.
*
* @param osdUUID
* UUID of the OSD
* @param password
* admin password
* @throws IOException
*
*/
public List<String> getCleanUpResult(String osdUUID, String password) throws IOException;
/**
* Returns a ServiceSet with all services of the given type.
*
* @param serviceType
*
* @return ServiceSet
* @throws XtreemFSException
* @throws IOException
*/
public ServiceSet getServiceByType(ServiceType serviceType) throws IOException;
/**
* Returns the Service with the given UUID
*
* @param uuid
* UUID of the Service
* @throws IOException
*/
public Service getServiceByUUID(String uuid) throws IOException;
/**
* Set the service status of the OSD with UUID "osdUUID" to "serviceStatus".
*
* @param osdUUID
* UUID of the OSD.
* @param serviceStatus
* service status which will be set.
* @throws IOException
*/
public void setOSDServiceStatus(String osdUUID, ServiceStatus serviceStatus) throws IOException;
/**
* Returns the current status of the OSD with the UUID "osdUUID".
*
* @param osdUUID
* @return
* @throws IOException
*/
public ServiceStatus getOSDServiceStatus(String osdUUID) throws IOException;
/**
* Returns a set of all removed OSDs.
*
* @throws IOException
*/
public Set<String> getRemovedOsds() throws IOException;
}

View File

@@ -0,0 +1,132 @@
/*
* Copyright (c) 2011 by Lukas Kairies, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import java.util.List;
import org.xtreemfs.common.clients.InvalidChecksumException;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replica;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicy;
public interface AdminFileHandle extends FileHandle {
/**
* Returns a list with all replicas of the File.
*
*/
public List<Replica> getReplicasList();
/**
* Returns the replica with index "replicaIndex".
*
* @param replicaIndex
* Index of the replica.
*/
public Replica getReplica(int replicaIndex);
/**
* Returns the striping policy of the file. If the file is replicated, the striping policy of the first
* replica is returned.
*/
public StripingPolicy getStripingPolicy();
/**
* Returns the striping policy of the replica with index "replicaIndex".
*
* @param replicaIndex
*/
public StripingPolicy getStripingPolicy(int replicaIndex);
/**
* Returns the replica update policy of the file as a String. Constants for replica update policies are
* defined in {@link org.xtreemfs.common.ReplicaUpdatePolicies}.
*
*/
public String getReplicaUpdatePolicy();
/**
* Returns the global ID of the file.
*
*/
public String getGlobalFileId();
/**
* Checks if a read-only replica with index "replicaIndex" is a complete replica and marks it as complete
* if not done yet.
*
* @param replicaIndex
* Index of the replica.
*/
public boolean checkAndMarkIfReadOnlyReplicaComplete(int replicaIndex, UserCredentials userCredentials)
throws IOException, AddressToUUIDNotFoundException;
/**
* Returns the number of objects.
*
* @param userCredentials
* Name and Groups of the user.
*
* @throws IOException
*/
public long getNumObjects(UserCredentials userCredentials) throws IOException;
/**
* Checks the object's checksum and returns the total number of bytes (data + sparse data) or throws an
* InvalidChecksumException.
*
* @param replicaIndex
* Replica from which the object will be checked.
* @param objectNo
* Object which will be checked.
*
* @throws InvalidChecksumException
* @throws IOException
*/
public int checkObjectAndGetSize(int replicaIndex, long objectNo) throws IOException,
InvalidChecksumException;
/**
* Repairs the Object with number "objectNo" of the replica with index "replicaIndex".
*
* @param replicaIndex
* Index of the Replica in the xlocset from which the object will be repaired.
* @param objectNo
* Object which will be repaired.
*/
public void repairObject(int replicaIndex, long objectNo) throws IOException;
/**
*
* Returns file size on the OSDs.
*
* @throws IOException
*/
public long getSizeOnOSD() throws IOException;
/**
* Same as truncate(userCredentials, newFileSize) but with the option to truncate the file only at the
* MRC.
*
* @param userCredentials
* Name and Groups of the user.
* @param newFileSize
* New size of the file.
* @param truncateOnlyAtMRC
* true if the file should be truncated only at the MRC, otherwise false.
*
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
* @throws IOException
*/
public void truncate(UserCredentials userCredentials, long newFileSize, boolean truncateOnlyAtMRC)
throws PosixErrorException, AddressToUUIDNotFoundException, IOException;
}

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2012 by Lukas Kairies, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
/**
* Extends the default Volume with additional functions. An admin volume object can be obtain by opening a
* volume with an admin client.
*
*/
public interface AdminVolume extends Volume {
/**
* Opens a file and returns the pointer to a {@link FileHandle} object.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file.
* @param flags
* Open flags as specified in xtreemfs::pbrpc::SYSTEM_V_FCNTL.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*
*/
public AdminFileHandle openFile(UserCredentials userCredentials, String path, int flags)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Same as previous openFile() except for the additional mode parameter, which sets the permissions for
* the file in case SYSTEM_V_FCNTL_H_O_CREAT is specified as flag and the file will be created.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public AdminFileHandle openFile(UserCredentials userCredentials, String path, int flags, int mode)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Returns the object number for the file at "path".
*
* @param userCredentials
* Name and groups of the user.
* @param path
* Path to the file.
*
* @throws IOException
*/
public long getNumObjects(UserCredentials userCredentials, String path) throws IOException;
/**
* Same as unlink(userCredentials, newFileSize) but with the option to unlink the file only at the MRC.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file.
* @param unlinkOnlyAtMrc
* true if the file should be unlinked only at the MRC, otherwise false.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public abstract void unlink(UserCredentials userCredentials, String path, boolean unlinkOnlyAtMrc)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
}

View File

@@ -0,0 +1,110 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
import org.xtreemfs.pbrpc.generatedinterfaces.OSD.writeRequest;
/**
*
* Stores all information needed for an asynchronous write.
*/
public class AsyncWriteBuffer {
/**
* Creates a new {@link AsyncWriteBuffer} which is using the osdUuidIterator from
* {@link AsyncWriteHandler}
*
*/
protected AsyncWriteBuffer(writeRequest writeRequest, ReusableBuffer data, int dataLength,
FileHandleImplementation fileHandle) {
this.writeRequest = writeRequest;
this.data = data;
this.dataLength = dataLength;
this.fileHandle = fileHandle;
this.osdUuid = null;
this.useUuidIterator = true;
}
/**
* Creates a new {@link AsyncWriteBuffer} with a own osdUuid
*/
protected AsyncWriteBuffer(writeRequest writeRequest, ReusableBuffer data, int dataLength,
FileHandleImplementation fileHandle, String osdUuid) {
this.writeRequest = writeRequest;
this.data = data;
this.dataLength = dataLength;
this.fileHandle = fileHandle;
this.osdUuid = osdUuid;
this.useUuidIterator = false;
}
/**
* Additional information of the write request.
*/
private writeRequest writeRequest;
/**
* Actual payload of the write request.
*/
private ReusableBuffer data;
/**
* Length of the payload.
*/
private int dataLength;
/**
* FileHandle which did receive the write() command.
*/
private FileHandleImplementation fileHandle;
/**
* Set to false if the member "osdUuid" is used instead of the FileInfo's osdUuidIterator in order to
* determine the OSD to be used.
*/
private boolean useUuidIterator;
/**
* UUID of the OSD which was used for the last retry or if useUuidIterator is false, this variable is
* initialized to the OSD to be used.
*/
private String osdUuid;
protected writeRequest getWriteRequest() {
return writeRequest;
}
protected ReusableBuffer getData() {
return data;
}
protected int getDataLength() {
return dataLength;
}
protected FileHandleImplementation getFileHandle() {
return fileHandle;
}
protected boolean isUsingUuidIterator() {
return useUuidIterator;
}
protected String getOsdUuid() {
return osdUuid;
}
/**
* @param osdUuid
*/
protected void setOsdUuid(String osdUuid) {
this.osdUuid = osdUuid;
}
};

View File

@@ -0,0 +1,349 @@
package org.xtreemfs.common.libxtreemfs;
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.UUIDIteratorListIsEmpyException;
import org.xtreemfs.common.libxtreemfs.exceptions.XtreemFSException;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.foundation.pbrpc.client.PBRPCException;
import org.xtreemfs.foundation.pbrpc.client.RPCResponse;
import org.xtreemfs.foundation.pbrpc.client.RPCResponseAvailableListener;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.Auth;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.ErrorType;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.OSDWriteResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XCap;
import org.xtreemfs.pbrpc.generatedinterfaces.OSDServiceClient;
import com.google.protobuf.Descriptors.EnumValueDescriptor;
//JCIP import net.jcip.annotations.GuardedBy;
/**
* Handles asynchronous writes. Used only internally.
*/
public class AsyncWriteHandler {
/**
* Possible states of this object.
*/
private enum State {
IDLE, WRITES_PENDING
}
/**
* State of this object.
*/
// JCIP @GuardedBy("this")
private State state;
/**
* List of pending writes.
*/
// TODO(mberlin): Limit the size of writes in flight to avoid flooding.
// JCIP @GuardedBy("this")
private List<AsyncWriteBuffer> writesInFlight;
/**
* Number of pending bytes.
*/
// JCIP @GuardedBy("this")
private int pendingBytes;
/**
* Set by WaitForPendingWrites() to true if there are temporarily no new async writes allowed and will be
* set to false again once the state IDLE is reached.
*/
// JCIP @GuardedBy("this")
private boolean writingPaused;
/**
* Used to notify blocked WaitForPendingWrites() callers for the state change back to IDLE.
*/
private Object allPendingWritesDidComplete;
/**
* Number of threads blocked by WaitForPendingWrites() waiting on allPendingWritesDidComplete for a state
* change back to IDLE.
*/
// JCIP @GuardedBy("this")
private int waitingBlockingThreadsCount;
/**
* FileInfo object to which this AsyncWriteHandler does belong. Accessed for file size updates.
*/
private FileInfo fileInfo;
/**
* Pointer to the UUIDIterator of the FileInfo object.
*/
private UUIDIterator uuidIterator;
/**
* Required for resolving UUIDs to addresses.
*/
private UUIDResolver uuidResolver;
/**
* Client which is used to send out the writes.
*/
OSDServiceClient osdServiceClient;
/**
* Auth needed for ServiceClients. Always set to AUTH_NONE by Volume.
*/
private Auth authBogus;
/**
* For same reason needed as authBogus. Always set to user "xtreemfs".
*/
private UserCredentials userCredentialsBogus;
/**
* Maximum number in bytes which may be pending.
*/
private int maxWriteahead;
/**
* Maximum number of pending write requests.
*/
private int maxWriteaheadRequests;
/**
* Maximum number of attempts a write will be tried.
*/
private int maxWriteTries;
protected AsyncWriteHandler(FileInfo fileInfo, UUIDIterator uuidIterator, UUIDResolver uuidResolver,
OSDServiceClient osdServiceClient, Auth authBogus, UserCredentials userCredentialsBogus,
int maxWriteahead, int maxWriteaheadRequests, int maxWriteTries) {
this.fileInfo = fileInfo;
this.uuidIterator = uuidIterator;
this.uuidResolver = uuidResolver;
this.osdServiceClient = osdServiceClient;
this.authBogus = authBogus;
this.userCredentialsBogus = userCredentialsBogus;
this.maxWriteahead = maxWriteahead;
this.maxWriteaheadRequests = maxWriteaheadRequests;
this.maxWriteTries = maxWriteTries;
writesInFlight = new ArrayList<AsyncWriteBuffer>();
allPendingWritesDidComplete = new Object();
state = State.IDLE;
}
/**
* Adds writeBuffer to the list of pending writes and sends it to the OSD specified by
* writeBuffer.uuidIterator (or write_buffer.osdUuid if writeBuffer.useUuidIterator is false).
*
* Blocks if the number of pending bytes exceeds the maximum write-ahead or waitForPendingWrites() was
* called beforehand.
*/
protected void write(AsyncWriteBuffer writeBuffer) throws AddressToUUIDNotFoundException,
XtreemFSException {
assert (writeBuffer != null);
if (writeBuffer.getDataLength() > maxWriteahead) {
throw new XtreemFSException("The maximum allowed writeahead size: " + maxWriteahead
+ " is smaller than the size of this write request: " + writeBuffer.getDataLength());
}
// append to the list of write in flight
synchronized (this) {
while (writingPaused || (pendingBytes + writeBuffer.getDataLength()) > maxWriteahead
|| writesInFlight.size() == maxWriteaheadRequests) {
// TODO: Allow interruption and set the write status of the FileHandle of the
// interrupted write to an error state.
try {
this.wait();
} catch (InterruptedException e) {
// TODO: handle exception
}
}
increasePendingBytesHelper(writeBuffer);
}
String osdUuid = retrieveOSDUuidAndSetItInWriteBuffer(writeBuffer);
String osdAddress = uuidResolver.uuidToAddress(osdUuid);
InetSocketAddress osdInetSocketAddress =
Helper.stringToInetSocketAddress(osdAddress,
GlobalTypes.PORTS.OSD_PBRPC_PORT_DEFAULT.getNumber());
// Sending request
final AsyncWriteBuffer finalWriteBufferForCallback = writeBuffer;
RPCResponse<OSDWriteResponse> response;
try {
response =
osdServiceClient.write(osdInetSocketAddress, authBogus, userCredentialsBogus,
writeBuffer.getWriteRequest(), writeBuffer.getData());
response.registerListener(new RPCResponseAvailableListener<OSDWriteResponse>() {
@Override
public void responseAvailable(RPCResponse<OSDWriteResponse> r) {
try {
OSDWriteResponse osdWriteResponse = r.get();
writeFinished(osdWriteResponse, r.getData(), finalWriteBufferForCallback);
} catch (PBRPCException e) {
String errorTypeName = e.getErrorType().toString();
EnumValueDescriptor enumDescriptor =
ErrorType.getDescriptor().findValueByNumber(e.getErrorType().getNumber());
if (enumDescriptor != null) {
errorTypeName = enumDescriptor.getName();
}
Logging.logMessage(Logging.LEVEL_ERROR, Category.misc, this,
"An async write sent to the server %s failed."
+ " Error type: %s Error message: %s Complete error header: %s",
finalWriteBufferForCallback.getOsdUuid(), errorTypeName, e.getErrorMessage(),
e.getDebugInfo());
System.out.println("CLASSNAME: " + this.toString());
decreasePendingBytesHelper(finalWriteBufferForCallback);
} catch (Exception e) {
Logging.logMessage(Logging.LEVEL_ERROR, Category.misc, this, "asyncWrite:"
+ " failed due to the following reasons ", e.getMessage());
decreasePendingBytesHelper(finalWriteBufferForCallback);
} finally {
r.freeBuffers();
}
}
});
} catch (IOException e1) {
Logging.logMessage(Logging.LEVEL_ERROR, Category.misc, this, "asyncWrite:"
+ " failed due to the following reasons ", e1.getMessage());
decreasePendingBytesHelper(finalWriteBufferForCallback);
}
}
private String retrieveOSDUuidAndSetItInWriteBuffer(AsyncWriteBuffer writeBuffer)
throws UUIDIteratorListIsEmpyException {
String osdUuid;
if (writeBuffer.isUsingUuidIterator()) {
osdUuid = uuidIterator.getUUID();
// Store used OSD in writeBuffer for the callback.
writeBuffer.setOsdUuid(osdUuid);
} else {
osdUuid = writeBuffer.getOsdUuid();
}
return osdUuid;
}
/**
* Blocks until state changes back to IDLE and prevents allowing new writes. by blocking further write()
* calls.
*/
protected void waitForPendingWrites() {
synchronized (this) {
if (state != State.IDLE) {
writingPaused = false;
waitingBlockingThreadsCount++;
} else {
return;
}
}
while (state != State.IDLE) {
synchronized (allPendingWritesDidComplete) {
try {
allPendingWritesDidComplete.wait();
} catch (InterruptedException e) {
// TODO: REALLY handle exception.
e.printStackTrace();
}
}
}
synchronized (this) {
waitingBlockingThreadsCount--;
}
}
/**
* Implements callback for an async write request.
*/
private void writeFinished(OSDWriteResponse response, ReusableBuffer data, AsyncWriteBuffer writeBuffer) {
// Tell FileInfo about the OSDWriteResponse.
if (response.hasSizeInBytes()) {
XCap xcap = writeBuffer.getFileHandle().getXcap();
fileInfo.tryToUpdateOSDWriteResponse(response, xcap);
}
decreasePendingBytesHelper(writeBuffer);
}
/**
* Helper function which adds "writeBuffer" to the list "writesInFlight", increases the number of pending
* bytes and takes care of state changes.
*/
protected void increasePendingBytesHelper(AsyncWriteBuffer writeBuffer) {
assert (writeBuffer != null);
pendingBytes += writeBuffer.getDataLength();
writesInFlight.add(writeBuffer);
assert (writesInFlight.size() <= maxWriteaheadRequests);
state = State.WRITES_PENDING;
}
/**
* Helper function which removes "writeBuffer" from the list "writesInFlight", deletes "writeBuffer",
* reduces the number of pending bytes and takes care of state changes.
*
* @remark Ownership of "writeBuffer" is transferred to the caller.
* @remark Requires a lock on "asyncWriteHandlerLock".
*/
private synchronized void decreasePendingBytesHelper(AsyncWriteBuffer writeBuffer) {
assert (writeBuffer != null);
writesInFlight.remove(writeBuffer);
pendingBytes -= writeBuffer.getDataLength();
if (pendingBytes == 0) {
state = State.IDLE;
if (writingPaused) {
writingPaused = false;
}
// Issue notifyAll() as long as there are remaining blocked threads.
//
// Please note the following here: After the two notifyAll()s on the
// condition variables "allPendingWritesDidComplete and
// pendingBytesWereDecreased, two different thread types
// (waiting blocked ones AND further waiting writes) do race for
// re-acquiring the lock on mutex_.
// Example:
// T1: write1 "state" = PENDING
// T2: getattr "writingPaused" = true => blocked as "state" != IDLE
// T1: write2 => blocked as "writingPaused" = true
// Tx: write1 callback: "state" = IDLE, writing_paused_ = false
// T1: write2 succeeds to obtain lock on mutex_ *before* getattr
// => state = IDLE (writing_paused_ remains false)
// Tx: write2 callback: state = IDLE, writing paused remains false
// - however its necessary to notify the blocked getattr.
// As you can see the order of concurrent writes and reads/getattrs
// is undefined and we don't enforce any order as it's up to the user to
// synchronize his threads himself when working on the same file.
if (waitingBlockingThreadsCount > 0) {
synchronized (allPendingWritesDidComplete) {
allPendingWritesDidComplete.notifyAll();
}
}
}
// Tell blocked writers there may be enough space/writing was unpaused now.
this.notifyAll();
}
}

View File

@@ -0,0 +1,333 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.common.libxtreemfs.exceptions.VolumeNotFoundException;
import org.xtreemfs.foundation.SSLOptions;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.Auth;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.DIR.Service;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.AccessControlPolicyType;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.KeyValuePair;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicyType;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Volumes;
/**
* Provides methods to open, close, create, delete and list volumes and to instantiate a new client object, to
* start and shutdown a Client object.
*/
public interface Client {
/**
* Need to be called before client can be used. Initializes required stuff.
*/
public void start() throws Exception;
/**
* Same as start(), but add option to start Threads as daemons. Daemon threads are only used by the XtreemFSHadoopClient.
*
* @param startThreadsAsDaemons if true, all threads are as daemons.
*/
public void start(boolean startThreadsAsDaemons) throws Exception;
public void shutdown();
/**
* Open a volume and use the returned class to access it.
*
* @remark Ownership is NOT transferred to the caller. Instead Volume->Close() has to be called to destroy
* the object.
*
* @throws AddressToUUIDNotFoundException
* @throws VolumeNotFoundException
* @throws {@link IOException}
*/
public Volume openVolume(String volumeName, SSLOptions sslOptions, Options options)
throws AddressToUUIDNotFoundException, VolumeNotFoundException, IOException;
/**
* Creates a volume on the MRC at mrc_address using certain default values ( POSIX access policy type,
* striping size = 128k and width = 1 (i.e. no striping), mode = 777 and owner username and groupname
* retrieved from the user_credentials.
*
* @param mrcAddress
* String of the form "hostname:port".
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param userCredentials
* Username and groups of the user who executes CreateVolume(). Not checked so far?
* @param volumeName
* Name of the new volume.
*
* @throws IOException
* @throws PosixErrorException
*/
public void createVolume(String mrcAddress, Auth auth, UserCredentials userCredentials, String volumeName)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Creates a volume on the MRC at mrc_address using certain default values ( POSIX access policy type,
* striping size = 128k and width = 1 (i.e. no striping), mode = 777 and owner username and groupname
* retrieved from the user_credentials.
*
* @param mrcAddresses
* List of Strings of the form "hostname:port".
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param userCredentials
* Username and groups of the user who executes CreateVolume(). Not checked so far?
* @param volumeName
* Name of the new volume.
*
* @throws IOException
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public void createVolume(List<String> mrcAddresses, Auth auth, UserCredentials userCredentials,
String volumeName) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Creates a volume on the first found MRC.
*
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param userCredentials
* Username and groups of the user who executes CreateVolume().
* @param volumeName
* Name of the new volume.
* @param mode
* Mode of the volume's root directory (in octal representation (e.g. 511), not decimal (777)).
* @param ownerUsername
* Name of the owner user.
* @param ownerGroupname
* Name of the owner group.
* @param accessPolicyType
* Access policy type (Null, Posix, Volume, ...).
* @param defaultStripingPolicyType
* Only RAID0 so far.
* @param defaultStripeSize
* Size of an object on the OSD (in kBytes).
* @param defaultStripeWidth
* Number of OSDs objects of a file are striped across.
* @param volumeAttributes
* Reference to a list of key-value pairs of volume attributes which will bet set at creation
* time of the volume.
*
* @throws AddressToUUIDNotFoundException
* @throws PosixErrorException
* @throws IOException
*/
public void createVolume(Auth auth, UserCredentials userCredentials, String volumeName, int mode,
String ownerUsername, String ownerGroupname, AccessControlPolicyType accessPolicyType,
StripingPolicyType defaultStripingPolicyType, int defaultStripeSize, int defaultStripeWidth,
List<KeyValuePair> volumeAttributes) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* Creates a volume on the MRC at mrc_address.
*
* @param mrcAddress
* String of the form "hostname:port".
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param userCredentials
* Username and groups of the user who executes CreateVolume().
* @param volumeName
* Name of the new volume.
* @param mode
* Mode of the volume's root directory (in octal representation (e.g. 511), not decimal (777)).
* @param ownerUsername
* Name of the owner user.
* @param ownerGroupname
* Name of the owner group.
* @param accessPolicyType
* Access policy type (Null, Posix, Volume, ...).
* @param defaultStripingPolicyType
* Only RAID0 so far.
* @param defaultStripeSize
* Size of an object on the OSD (in kBytes).
* @param defaultStripeWidth
* Number of OSDs objects of a file are striped across.
* @param volumeAttributes
* Reference to a list of key-value pairs of volume attributes which will bet set at creation
* time of the volume.
*
* @throws IOException
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public void createVolume(String mrcAddress, Auth auth, UserCredentials userCredentials,
String volumeName, int mode, String ownerUsername, String ownerGroupname,
AccessControlPolicyType accessPolicyType, StripingPolicyType defaultStripingPolicyType,
int defaultStripeSize, int defaultStripeWidth, List<KeyValuePair> volumeAttributes)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Creates a volume on the MRC at mrc_address.
*
* @param mrcAddresses
* List of Strings of the form "hostname:port".
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param userCredentials
* Username and groups of the user who executes CreateVolume().
* @param volumeName
* Name of the new volume.
* @param mode
* Mode of the volume's root directory (in octal representation (e.g. 511), not decimal (777)).
* @param ownerUsername
* Name of the owner user.
* @param ownerGroupname
* Name of the owner group.
* @param accessPolicyType
* Access policy type (Null, Posix, Volume, ...).
* @param defaultStripingPolicyType
* Only RAID0 so far.
* @param defaultStripeSize
* Size of an object on the OSD (in kBytes).
* @param defaultStripeWidth
* Number of OSDs objects of a file are striped across.
* @param volumeAttributes
* Reference to a list of key-value pairs of volume attributes which will bet set at creation
* time of the volume.
*
* @throws IOException
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public void createVolume(List<String> mrcAddresses, Auth auth, UserCredentials userCredentials,
String volumeName, int mode, String ownerUsername, String ownerGroupname,
AccessControlPolicyType accessPolicyType, StripingPolicyType defaultStripingPolicyType,
int defaultStripeSize, int defaultStripeWidth, List<KeyValuePair> volumeAttributes)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Deletes the volume "volume_name" at the MRC "mrc_address".
*
* @param mrcAddress
* String of the form "hostname:port".
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param userCredentials
* Username and groups of the user who executes CreateVolume().
* @param volumeName
* Name of the volume to be deleted.
*
* @throws IOException
* @throws PosixErrorException
*/
public void deleteVolume(String mrcAddress, Auth auth, UserCredentials userCredentials, String volumeName)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Deletes the volume "volume_name" at the MRC "mrc_address".
*
* @param mrcAddresses
* List of Strings of the form "hostname:port".
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param userCredentials
* Username and groups of the user who executes CreateVolume().
* @param volumeName
* Name of the volume to be deleted.
*
* @throws IOException
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public void deleteVolume(List<String> mrcAddresses, Auth auth, UserCredentials userCredentials,
String volumeName) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Retrieves the first MRC from the DIR and deletes the volume "volume_name" at this the MRC.
*
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param userCredentials
* Username and groups of the user who executes CreateVolume().
* @param volumeName
* Name of the volume to be deleted.
*
* @throws IOException
* @throws PosixErrorException
*/
public void deleteVolume(Auth auth, UserCredentials userCredentials, String volumeName)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Returns the available volumes on a MRC.
*
* @param mrcAddress
* String of the form "hostname:port".
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
*/
public Volumes listVolumes(String mrcAddress) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* Returns the available volumes on a MRC.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
* @remark Ownership of the return value is transferred to the caller.
*/
public Volumes listVolumes() throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Returns the available volumes as list of names
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
*/
public String[] listVolumeNames() throws IOException;
/**
* Returns the available volumes on a MRC.
*
* @param mrcAddresses
* <String> List which contains MRC addresses of the form "hostname:port".
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
* @remark Ownership of the return value is transferred to the caller.
*/
public Volumes listVolumes(List<String> mrcAddresses) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* List all servers and return their host names
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public Map<String, Service> listServers() throws IOException, PosixErrorException;
/**
* Returns the registered UUID of the OSDs and their attributes on the DIR.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public Map<String, Service> listOSDsAndAttributes() throws IOException, PosixErrorException;
}

View File

@@ -0,0 +1,91 @@
/*
* Copyright (c) 2012 by Lukas Kairies, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import org.xtreemfs.foundation.SSLOptions;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
/**
* Returns different kinds of Client implementations.
*
*/
public class ClientFactory {
/**
* Returns an instance of a default client with one DIR Address.
*
* @param dirServiceAddress
* Address of the DIR service (Format: ip-addr:port, e.g. localhost:32638)
* @param userCredentials
* Name and Groups of the user.
* @param sslOptions
* NULL if no SSL is used.
* @param options
* Has to contain loglevel string and logfile path.
*/
public static Client createClient(String dirServiceAddress, UserCredentials userCredentials,
SSLOptions sslOptions, Options options) {
String[] dirAddresses = new String[1];
dirAddresses[0] = dirServiceAddress;
return new ClientImplementation(dirAddresses, userCredentials, sslOptions, options);
}
/**
* Returns an instance of a default client with multiple DIR Addresses.
*
* @param dirServiceAddresses
* Addresses of the DIR services (Format: ip-addr:port, e.g. localhost:32638)
* @param userCredentials
* Name and Groups of the user.
* @param sslOptions
* NULL if no SSL is used.
* @param options
* Has to contain loglevel string and logfile path.
*/
public static Client createClient(String[] dirServiceAddresses, UserCredentials userCredentials,
SSLOptions sslOptions, Options options) {
return new ClientImplementation(dirServiceAddresses, userCredentials, sslOptions, options);
}
/**
* Returns an instance of an admin client with one DIR address.
*
* @param dirServiceAddress
* Address of the DIR service (Format: ip-addr:port, e.g. localhost:32638)
* @param userCredentials
* Name and Groups of the user.
* @param sslOptions
* NULL if no SSL is used.
* @param options
* Has to contain loglevel string and logfile path.
*/
public static AdminClient createAdminClient(String dirServiceAddress, UserCredentials userCredentials,
SSLOptions sslOptions, Options options) {
String[] dirAddresses = new String[1];
dirAddresses[0] = dirServiceAddress;
return new ClientImplementation(dirAddresses, userCredentials, sslOptions, options);
}
/**
* Returns an instance of a admin client with multiple DIR Addresses.
*
* @param dirServiceAddresses
* Addresses of the DIR services (Format: ip-addr:port, e.g. localhost:32638)
* @param userCredentials
* Name and Groups of the user.
* @param sslOptions
* NULL if no SSL is used.
* @param options
* Has to contain loglevel string and logfile path.
*
*/
public static AdminClient createAdminClient(String[] dirServiceAddresses,
UserCredentials userCredentials, SSLOptions sslOptions, Options options) {
return new ClientImplementation(dirServiceAddresses, userCredentials, sslOptions, options);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,306 @@
/*
* Copyright (c) 2008-2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.common.libxtreemfs.exceptions.UUIDNotInXlocSetException;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Stat;
import org.xtreemfs.pbrpc.generatedinterfaces.OSD.Lock;
/**
* FileHandle represents an open file.
*/
public interface FileHandle {
/**
* Read from a file 'count' bytes starting at 'offset' into 'buf'.
*
* @param userCredentials
* Name and Groups of the user.
* @param data
* [out] Byte array to be filled with read data.
* @param count
* Number of requested bytes.
* @param offset
* Offset in bytes.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
* @return Number of bytes read.
*/
public int read(UserCredentials userCredentials, byte[] data, int count, long offset)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Read from a file 'count' bytes starting at 'offset' into 'buf'.
*
* @param userCredentials
* Name and Groups of the user.
* @param data
* [out] Byte array to be filled with read data.
* @param dataOffset
* Offset in data array. This is the position of the first bytes in the data array that should
* be read.
* @param count
* Number of requested bytes.
* @param offset
* Offset in bytes. At this position in the file the data will be read.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
* @return Number of bytes read.
*/
public int read(UserCredentials userCredentials, byte[] data, int dataOffset, int count, long offset)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Write to a file 'count' bytes at file offset 'offset' from 'buf'.
*
* @attention If asynchronous writes are enabled (which is the default unless the file was opened with
* O_SYNC or async writes were disabled globally), no possible write errors can be returned as
* write() does return immediately after putting the write request into the send queue instead
* of waiting until the result was received. In this case, only after calling flush() or
* close() occurred write errors are returned to the user.
*
* @param userCredentials
* Name and Groups of the user.
* @param data
* [] [in] Byte array which contains data to be written.
* @param count
* Number of bytes to be written from buf.
* @param offset
* Offset in bytes.
*
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
* @return Number of bytes written (see @attention above).
*/
public int write(UserCredentials userCredentials, byte[] data, int count, long offset)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Write to a file 'count' bytes at file offset 'offset' from 'buf'.
*
* @attention If asynchronous writes are enabled (which is the default unless the file was opened with
* O_SYNC or async writes were disabled globally), no possible write errors can be returned as
* write() does return immediately after putting the write request into the send queue instead
* of waiting until the result was received. In this case, only after calling flush() or
* close() occurred write errors are returned to the user.
*
* @param userCredentials
* Name and Groups of the user.
*
* @param data
* [] [in] Byte array which contains data to be written.
* @param dataOffset
* Offset in data array. This is the position of the first bytes in the data array that should
* be written.
* @param count
* Number of bytes to be written from buf.
* @param offset
* Offset in bytes. At this position in the file the data will be written.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
* @return Number of bytes written (see @attention above).
*/
public int write(UserCredentials userCredentials, byte[] data, int dataOffset, int count,
long offset) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Flushes pending writes and file size updates (corresponds to a fsync() system call).
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public void flush() throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Truncates the file to "newFileSize_ bytes".
*
* @param userCredentials
* Name and Groups of the user.
* @param newFileSize
* New size of the file.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
**/
public void truncate(UserCredentials userCredentials, long newFileSize) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
* Retrieve the attributes of this file and writes the result in "stat".
*
* @param userCredentials
* Name and Groups of the user.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public Stat getAttr(UserCredentials userCredentials) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* Sets a lock on the specified file region and returns the resulting Lock object.
*
* If the acquisition of the lock fails, PosixErrorException will be thrown and posix_errno() will return
* POSIX_ERROR_EAGAIN.
*
* @param userCredentials
* Name and Groups of the user.
* @param processId
* ID of the process to which the lock belongs.
* @param offset
* Start of the region to be locked in the file.
* @param length
* Length of the region.
* @param exclusive
* shared/read lock (false) or write/exclusive (true)?
* @param waitForLock
* if true, blocks until lock acquired.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
* @remark Ownership is transferred to the caller.
*/
public Lock acquireLock(UserCredentials userCredentials, int processId, long offset,
long length, boolean exclusive, boolean waitForLock) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* Checks if the requested lock does not result in conflicts. If true, the returned Lock object contains
* the requested 'process_id' in 'client_pid', otherwise the Lock object is a copy of the conflicting
* lock.
*
* @param userCredentials
* Name and Groups of the user.
* @param processId
* ID of the process to which the lock belongs.
* @param offset
* Start of the region to be locked in the file.
* @param length
* Length of the region.
* @param exclusive
* shared/read lock (false) or write/exclusive (true)?
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*
* @remark Ownership is transferred to the caller.
*/
public Lock checkLock(UserCredentials userCredentials, int processId, long offset, long length,
boolean exclusive) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Releases "lock".
*
* @param userCredentials
* Name and Groups of the user.
* @param processId
* ID of the process to which the lock belongs.
* @param offset
* Start of the region to be locked in the file.
* @param length
* Length of the region.
* @param exclusive
* shared/read lock (false) or write/exclusive (true)?
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public void releaseLock(UserCredentials userCredentials, int processId, long offset,
long length, boolean exclusive) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* Releases "lock" (parameters given in Lock object).
*
* @param userCredentials
* Name and Groups of the user.
* @param lock
* Lock to be released.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public void releaseLock(UserCredentials userCredentials, Lock lock) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
* Releases the lock possibly hold by "processId". Use this before closing a file to ensure POSIX
* semantics:
*
* "All locks associated with a file for a given process shall be removed when a file descriptor for that
* file is closed by that process or the process holding that file descriptor terminates."
* (http://pubs.opengroup.org/onlinepubs/009695399/functions/fcntl.html)
*
* @param processId
* ID of the process whose lock shall be released.
*
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public void releaseLockOfProcess(int processId) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* Triggers the replication of the replica on the OSD with the UUID "osd_uuid" if the replica is a full
* replica (and not a partial one).
*
* The Replica had to be added beforehand and "osd_uuid" has to be included in the XlocSet of the file.
*
* @param userCredentials
* Name and Groups of the user.
* @param osdUuid
* UUID of the OSD where the replica is located.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
* @throws UUIDNotInXlocSetException
*/
public void pingReplica(UserCredentials userCredentials, String osdUuid) throws IOException,
AddressToUUIDNotFoundException;
/**
* Closes the open file handle (flushing any pending data).
*
* @attention Please execute ReleaseLockOfProcess() first if there're multiple open file handles for the
* same file and you want to ensure the POSIX semantics that with the close of a file handle
* the lock (XtreemFS allows only one per tuple (client UUID, Process ID)) of the process will
* be closed. If you do not care about this, you don't have to release any locks on your own as
* all locks will be automatically released if the last open file handle of a file will be
* closed.
*
* @throws IOException
*/
public void close() throws IOException;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,667 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicInteger;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.common.libxtreemfs.exceptions.XtreemFSException;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.OSDWriteResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XCap;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XLocSet;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Stat;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.getattrRequest;
import org.xtreemfs.pbrpc.generatedinterfaces.OSD.Lock;
//JCIP import net.jcip.annotations.GuardedBy;
/**
* Stores metadata of the the file as well as the current size. One FileInfo is responsible for all open FileHandles.
*/
public class FileInfo {
/**
* Different states regarding osdWriteResponse and its write back.
*/
enum FilesizeUpdateStatus {
kClean, kDirty, kDirtyAndAsyncPending, kDirtyAndSyncPending
};
/**
* Volume which did open this file.
*/
private VolumeImplementation volume;
/**
* XtreemFS File ID of this file (does never change).
*/
long fileId;
/**
* Path of the File, used for debug output and writing back the OSDWriteResponse to the MetadataCache.
*/
// JCIP @GuardedBy("pathLock")
private String path;
/**
* Used to protect "path".
*/
private final Object pathLock;
/**
* Extracted from the FileHandle's XCap: true if an explicit close() has to be send to the MRC in order to
* trigger the on close replication.
*/
boolean replicateOnClose;
/**
* Number of file handles which hold a pointer on this object.
*/
private AtomicInteger referenceCount;
/**
* List of corresponding OSDs.
*/
// JCIP @GuardedBy("xLocSetLock")
private XLocSet xlocset;
/**
* Use this to protect "xlocset" and "replicateOnClose".
*/
Object xLocSetLock;
/**
* Use this to protect the renewal of xLocSets.
*/
Object xLocSetRenewalLock;
/**
* UUIDIterator which contains the UUIDs of all replicas.
*
* If striping is used, replication is not possible. Therefore, for striped files the UUID Iterator will
* contain only the head OSD.
*/
private UUIDIterator osdUuidIterator;
/**
* List of active locks (acts as a cache). The OSD allows only one lock per (client UUID, PID) tuple.
*/
private ConcurrentHashMap<Integer, Lock> activeLocks;
/**
* Random UUID of this client to distinguish them while locking.
*/
private String clientUuid;
/**
* List of open FileHandles for this file.
*/
private ConcurrentLinkedQueue<FileHandleImplementation> openFileHandles;
/**
* List of open FileHandles which solely exist to propagate a pending file size update (a OSDWriteResponse
* object) to the MRC.
*
* This extra list is needed to distinguish between the regular file handles (see open_file_handles_) and
* the ones used for file size updates. The intersection of both lists is empty.
*/
// JCIP @GuardedBy("osdWriteResponseLock")
private List<FileHandle> pendingFilesizeUpdates;
/**
* Pending file size update after a write() operation, may be NULL.
*
* If osdWriteResponse != NULL, the fileSize and "truncateEpoch" of the referenced
* {@link OSDWriteResponse} have to be respected, e.g. when answering a {@link getattrRequest}. This
* "osdWriteResponse" also corresponds to the "maximum" of all known OSDWriteReponses. The maximum has the
* highest "truncateEpoch", or if equal compared to another response, the higher "sizeInBytes" value.
*/
// JCIP @GuardedBy("osdWriteResponseLock")
private OSDWriteResponse osdWriteResponse;
/**
* Denotes the state of the stored "osdWriteResponse" object.
*/
// JCIP @GuardedBy("osdWriteResponseLock")
private FilesizeUpdateStatus osdWriteResponseStatus;
/**
* XCap required to send an OSDWriteResponse to the MRC.
*/
// JCIP @GuardedBy("osdWriteResponseLock")
private XCap osdWriteResponseXcap;
/**
* Always lock to access "osdWriteResponse", "osdWriteResponseStatus", "osdWriteResponseXcap" or
* "pendingFilesizeUpdates".
*/
private final Object osdWriteResponseLock;
/**
* Proceeds async writes, handles the callbacks and provides a waitForPendingWrites() method for barrier
* operations like read.
*/
AsyncWriteHandler asyncWriteHandler;
/**
*
*/
public FileInfo(VolumeImplementation volume, long fileId, String path, boolean replicateOnClose,
XLocSet xlocset, String clientUuid) {
this.volume = volume;
this.fileId = fileId;
this.path = path;
this.replicateOnClose = replicateOnClose;
this.xlocset = xlocset;
this.clientUuid = clientUuid;
referenceCount = new AtomicInteger(0);
osdWriteResponse = null;
osdWriteResponseStatus = FilesizeUpdateStatus.kClean;
osdWriteResponseLock = new Object();
pathLock = new Object();
xLocSetLock = new Object();
xLocSetRenewalLock = new Object();
openFileHandles = new ConcurrentLinkedQueue<FileHandleImplementation>();
activeLocks = new ConcurrentHashMap<Integer, Lock>();
// Add the UUIDs of all replicas to the UUID Iterator.
osdUuidIterator = new UUIDIterator();
osdUuidIterator.addUUIDs(Helper.getOSDUUIDsFromXlocSet(xlocset));
asyncWriteHandler = new AsyncWriteHandler(this, osdUuidIterator, volume.getUUIDResolver(),
volume.getOsdServiceClient(), volume.getAuthBogus(), volume.getUserCredentialsBogus(), volume
.getOptions().getMaxWriteahead(), volume.getOptions().getMaxWriteaheadRequests(),
volume.getOptions().getMaxWriteTries());
pendingFilesizeUpdates = new ArrayList<FileHandle>(volume.getOptions().getMaxWriteahead());
}
/**
* @see FileInfo#updateXLocSetAndRest(XLocSet, boolean)
*/
/**
* Create a copy of "newXlocSet" and save it to the "xlocset" member. "replicateOnClose" will be save in
* the corresponding member, too.
*
* @param newXlocset
* XlocSet that will be copied and set.
* @param replicateOnClose
* true if replicate on close is used. false otherwise.
*
*/
protected void updateXLocSetAndRest(XLocSet newXlocset, boolean replicateOnClose) {
synchronized (xLocSetLock) {
xlocset = XLocSet.newBuilder(newXlocset).build();
this.replicateOnClose = replicateOnClose;
}
// Update the osdUuidIterator to reflect the changes in the xlocset.
osdUuidIterator.clearAndAddUUIDs(Helper.getOSDUUIDsFromXlocSet(newXlocset));
}
/**
* @see FileInfo#updateXLocSetAndRest(XLocSet, boolean)
*/
protected void updateXLocSetAndRest(XLocSet newXlocset) {
synchronized (xLocSetLock) {
xlocset = XLocSet.newBuilder(newXlocset).build();
}
// Update the osdUuidIterator to reflect the changes in the xlocset.
osdUuidIterator.clearAndAddUUIDs(Helper.getOSDUUIDsFromXlocSet(newXlocset));
}
/**
* Returns a new FileHandle object to which xcap belongs.
*
*/
FileHandleImplementation createFileHandle(XCap xcap, boolean asyncWritesEnabled) {
return createFileHandle(xcap, asyncWritesEnabled, false);
}
/**
* See createFileHandle(xcap). Does not add fileHandle to list of open file handles if
* usedForPendingFilesizeUpdate=true.
*
* This function will be used if a FileHandle was solely created to asynchronously write back a dirty file
* size update (osdWriteResponse).
*
* @remark Ownership is transferred to the caller.
*/
FileHandleImplementation createFileHandle(XCap xcap, boolean asyncWritesEnabled,
boolean usedForPendingFilesizeUpdate) {
FileHandleImplementation fileHandleImplementation = new FileHandleImplementation(volume, clientUuid, this,
xcap, volume.getMrcUuidIterator(), osdUuidIterator, volume.getUUIDResolver(),
volume.getMrcServiceClient(), volume.getOsdServiceClient(), volume.getStripeTranslators(),
asyncWritesEnabled, volume.getOptions(), volume.getAuthBogus(),
volume.getUserCredentialsBogus());
// increase reference count and add it to openFileHandles
referenceCount.incrementAndGet();
openFileHandles.add(fileHandleImplementation);
return fileHandleImplementation;
}
/**
* Deregisters a closed FileHandle. Called by FileHandle.close().
*/
protected void closeFileHandle(FileHandleImplementation fileHandle) {
// Pending async writes and file size updates have already been flushed
// by fileHandle.
// remove file handle.
openFileHandles.remove(fileHandle);
// Decreasing reference count is handle by Volume.closeFile().
volume.closeFile(fileId, this, fileHandle);
}
/**
* Decreases the reference count and returns the current value.
*/
protected int decreaseReferenceCount() {
int count = referenceCount.decrementAndGet();
assert (count >= 0);
return count;
}
/**
* Returns a copy of "osdWriteResponse" if not NULL.
*/
protected OSDWriteResponse getOSDWriteResponse() {
synchronized (osdWriteResponseLock) {
if (osdWriteResponse == null) {
return null;
} else {
return osdWriteResponse.toBuilder().build();
}
}
}
/**
* Returns path.
*/
protected String getPath() {
synchronized (pathLock) {
return path;
}
}
/**
* Changes path to newPath if this.path == path.
*/
protected void renamePath(String path, String newPath) {
synchronized (pathLock) {
if (this.path.equals(path)) {
this.path = newPath;
}
}
}
/**
* Compares "response" against the current "osdWriteResponse". Returns true if response is newer and
* assigns "response" to "osdWriteResponse".
*
* If successful, a new file handle will be created and xcap is required to send the osdWriteResponse to
* the MRC in the background.
*
*/
protected boolean tryToUpdateOSDWriteResponse(OSDWriteResponse response, XCap xcap) {
assert (response != null);
synchronized (osdWriteResponseLock) {
if (Helper.compareOSDWriteResponses(response, osdWriteResponse) == 1) {
// update osdWriteResponse
osdWriteResponse = response.toBuilder().build();
osdWriteResponseXcap = xcap.toBuilder().build();
osdWriteResponseStatus = FilesizeUpdateStatus.kDirty;
return true;
} else {
return false;
}
}
}
/**
* Merge into a possibly outdated Stat object (e.g. from the StatCache) the current file size and
* truncateEpoch from a stored OSDWriteResponse.
*/
protected Stat mergeStatAndOSDWriteResponse(Stat stat) {
synchronized (osdWriteResponseLock) {
if (osdWriteResponse != null) {
// Check if information in Stat is newer than
// osdWriteResponse_.
if (stat.getTruncateEpoch() < osdWriteResponse.getTruncateEpoch()
|| stat.getTruncateEpoch() == osdWriteResponse.getTruncateEpoch()
&& stat.getSize() < osdWriteResponse.getSizeInBytes()) {
// Information from "osdWriteResponse" are newer.
stat = stat.toBuilder().setSize(osdWriteResponse.getSizeInBytes())
.setTruncateEpoch(osdWriteResponse.getTruncateEpoch()).build();
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"getattr: merged infos from osdWriteResponse, size: %s", stat.getSize());
}
}
}
}
return stat;
}
/**
* Sends pending file size updates to the MRC asynchronously.
*/
protected void writeBackFileSizeAsync() throws IOException, PosixErrorException,
AddressToUUIDNotFoundException {
synchronized (osdWriteResponseLock) {
// Only update pending file size updates.
if (osdWriteResponse != null && osdWriteResponseStatus == FilesizeUpdateStatus.kDirty) {
FileHandleImplementation fileHandle = createFileHandle(osdWriteResponseXcap, false, true);
pendingFilesizeUpdates.add(fileHandle);
osdWriteResponseStatus = FilesizeUpdateStatus.kDirtyAndAsyncPending;
fileHandle.setOsdWriteResponseForAsyncWriteBack(osdWriteResponse);
fileHandle.writeBackFileSizeAsync();
}
}
}
/**
* Renews xcap of all file handles of this file asynchronously.
*/
protected void renewXCapsAsync() throws AddressToUUIDNotFoundException {
Iterator<FileHandleImplementation> fhiIterator = openFileHandles.iterator();
try {
while (fhiIterator.hasNext()) {
fhiIterator.next().renewXCapAsync();
}
} catch (IOException ioe) {
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"renewXcapsSync: Failed to renew XCap for fileHandles. Reason: %s", ioe.getCause());
}
}
}
/**
* Releases all locks of processId using fileHandle to issue ReleaseLock().
*/
protected void releaseLockOfProcess(FileHandleImplementation fileHandle, int processId)
throws PosixErrorException, AddressToUUIDNotFoundException {
Lock lock = activeLocks.get(processId);
if (lock != null) {
try {
// releaseLock deletes Lock from activeLocks
fileHandle.releaseLock(lock);
} catch (IOException ioe) {
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"releaseLock: Failed to release Lock for processID: %s. Reason: %s", processId,
ioe.getCause());
}
}
}
}
/**
* Uses fileHandle to release all known local locks.
*/
protected void releaseAllLocks(FileHandleImplementation fileHandle) throws PosixErrorException,
AddressToUUIDNotFoundException {
for (Lock lock : activeLocks.values()) {
// FileHandleImplementation.releaseLock(lock) will delete the lock
// from activeLocks.
try {
fileHandle.releaseLock(lock);
} catch (IOException ioe) {
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"releaseAllLocks: Failed to release for some Locks. Reason: %s", ioe.getCause());
}
}
}
}
/**
* Blocks until all asynchronous file size updates are completed.
*/
protected void waitForPendingFileSizeUpdates() {
synchronized (osdWriteResponseLock) {
while (pendingFilesizeUpdates.size() > 0) {
try {
osdWriteResponseLock.wait();
} catch (InterruptedException e) {
// TODO: handle exception and figure out what happens if thread
// gets interrupted.
return;
}
}
}
}
/**
* Called by the file size update callback of FileHandle.
*/
protected void asyncFileSizeUpdateResponseHandler(OSDWriteResponse owr,
FileHandleImplementation fileHandle, boolean success) {
synchronized (osdWriteResponseLock) {
// Only change the status of the OSDWriteResponse has not changed
// meanwhile.
if (Helper.compareOSDWriteResponses(owr, osdWriteResponse) == 0) {
// The status must not have changed.
assert (osdWriteResponseStatus == FilesizeUpdateStatus.kDirtyAndAsyncPending);
if (success) {
osdWriteResponseStatus = FilesizeUpdateStatus.kClean;
} else {
osdWriteResponseStatus = FilesizeUpdateStatus.kDirty; // Still dirty.
}
}
// Always remove the temporary FileHandle.
pendingFilesizeUpdates.remove(fileHandle);
decreaseReferenceCount();
if (pendingFilesizeUpdates.size() == 0) {
osdWriteResponseLock.notifyAll();
}
}
}
/**
* Compares "lock" against list of active locks.
*
* Returns a {@link Tupel} where the first elements is the conflicting lock if such exits or null. The
* second element is a array of three boolean value in the following order: conflictFound,
* lockForPidCached, cachedLockForPidEqual.
*
* conflictFound is set to true and the conflicting, active lock return if there is a conflict. If no
* conflict was found, lockForPidCached is set to true if there exists already a lock for
* lock.getClientPid(). Additionally, cachedLockForPidEqual" will be set to true, lock is equal to the
* lock active for this pid.
*
*
* @param lock
* The {@link Lock} the locks in the list of active locks should be compared with.
*
* @return A Tupel<Lock, boolean[]>.
*/
protected Tupel<Lock, boolean[]> checkLock(Lock lock) {
assert (lock.getClientUuid().equals(clientUuid));
boolean conflictFound = false;
boolean lockForPidCached = false;
boolean cachedLockForPidEqual = false;
Lock conflictionLock = null;
for (Entry<Integer, Lock> entry : activeLocks.entrySet()) {
if (entry.getKey() == lock.getClientPid()) {
lockForPidCached = true;
if (Helper.checkIfLocksAreEqual(lock, entry.getValue())) {
cachedLockForPidEqual = true;
}
continue;
}
if (Helper.checkIfLocksDoConflict(lock, entry.getValue())) {
conflictFound = true;
conflictionLock = entry.getValue();
// A conflicting lock has a higher priority than a cached lock with the same PID.
break;
}
}
boolean[] bools = new boolean[] { conflictFound, lockForPidCached, cachedLockForPidEqual };
return new Tupel<Lock, boolean[]>(conflictionLock, bools);
}
/**
* Returns true if a lock for "processId" is known.
*/
protected boolean checkIfProcessHasLocks(int processId) {
return activeLocks.containsKey(processId);
}
/**
* Add a copy of "lock" to list of active locks.
*/
protected void putLock(Lock lock) {
assert (lock.getClientUuid().equals(clientUuid));
// Delete lock if it already exists.
activeLocks.remove(lock.getClientPid());
// Insert copy of lock.
Lock newLock = lock.toBuilder().build();
activeLocks.put(newLock.getClientPid(), newLock);
}
/**
* Remove locks equal to "lock" from list of active locks.
*/
protected void delLock(Lock lock) {
assert (lock.getClientUuid().equals(clientUuid));
// There is only up to one element per PID. Just find and delete it.
activeLocks.remove(lock.getClientPid());
}
/**
* Flushes pending async writes and file size updates.
*/
protected void flush(FileHandleImplementation fileHandle) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException {
flush(fileHandle, false);
}
/**
* Same as Flush(), takes special actions if called by FileHandle.close().
*/
protected void flush(FileHandleImplementation fileHandle, boolean closeFile) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException {
// We don't wait only for fileHandle's pending writes but for all writes of this file.
waitForPendingAsyncWrites();
flushPendingFileSizeUpdate(fileHandle, closeFile);
}
/**
* Flushes a pending file size update.
*/
protected void flushPendingFileSizeUpdate(FileHandleImplementation fileHandle) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException {
flushPendingFileSizeUpdate(fileHandle, false);
}
/**
* Calls asyncWriteHandler.write().
*/
void asyncWrite(AsyncWriteBuffer writeBuffer) throws XtreemFSException {
asyncWriteHandler.write(writeBuffer);
}
/**
* Calls asyncWriteHandler.waitForPendingWrites() (resulting in blocking until all pending async writes
* are finished).
*/
protected void waitForPendingAsyncWrites() {
asyncWriteHandler.waitForPendingWrites();
}
/**
* Same as flushPendingFileSizeUpdate(), takes special actions if called by close().
*
* @throws IOException
* @throws PosixErrorException
*/
private void flushPendingFileSizeUpdate(FileHandleImplementation fileHandle, boolean closeFile)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException {
// File size write back.
OSDWriteResponse responseCopy = null;
synchronized (osdWriteResponseLock) {
if (osdWriteResponse != null) {
waitForPendingFileSizeUpdates();
if (osdWriteResponseStatus == FilesizeUpdateStatus.kDirty) {
osdWriteResponseStatus = FilesizeUpdateStatus.kDirtyAndSyncPending;
// Create a copy of OSDWriteResponse to pass to FileHandle.
responseCopy = osdWriteResponse.toBuilder().build();
} else {
return;
}
} else {
return;
}
}
try {
fileHandle.writeBackFileSize(responseCopy, closeFile);
} catch (IOException e) {
osdWriteResponseStatus = FilesizeUpdateStatus.kDirty;
throw e;
}
synchronized (osdWriteResponseLock) {
// Only update the status if the response object has not changed meanwhile.
if (Helper.compareOSDWriteResponses(osdWriteResponse, responseCopy) == 0) {
osdWriteResponseStatus = FilesizeUpdateStatus.kClean;
}
boolean replicateOnCloseValue = false;
synchronized (xLocSetLock) {
replicateOnCloseValue = this.replicateOnClose;
}
if (closeFile && replicateOnCloseValue) {
// Send an explicit close only if the on-close-replication should be
// triggered. Use an empty OSDWriteResponse object therefore.
fileHandle.writeBackFileSize(OSDWriteResponse.getDefaultInstance(), closeFile);
}
}
}
/**
* Returns a new copy of xlocSet.
*/
protected XLocSet getXLocSet() {
synchronized (xLocSetLock) {
return xlocset.toBuilder().build();
}
}
}

View File

@@ -0,0 +1,333 @@
/*
* Copyright (c) 2008-2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.OSDSelectionPolicyType;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.OSDWriteResponse;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replica;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.SYSTEM_V_FCNTL;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicy;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XCap;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.XLocSet;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Stat;
import org.xtreemfs.pbrpc.generatedinterfaces.OSD.Lock;
/**
* Some static Helper functions which are used internally by different classes.
*/
public class Helper {
/**
* Generates a pseudorandom UUID with 32 chars length.
*
* @return String The pseudorandom UUID of length 32.
*
*/
// TODO: Ask michael why his uuid has length 36. oO
public static String generateVersion4UUID() {
// Base62 characters for UUID generation.
char set[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789".toCharArray();
String uuid = "";
int block_length[] = { 8, 4, 4, 4, 12 };
Random generator = new Random();
for (int i = 0; i < block_length.length; i++) {
for (int j = 0; j < block_length[i]; j++) {
// choose a pseudorandom, uniformly distributed char from the
// array set and append it
// to the uuid.
uuid = uuid + set[generator.nextInt(62)];
}
}
assert (uuid.length() == 32);
// TODO: Avoid this stupid new Helper()
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, new Helper(), "Generated client UUID: %s",
uuid);
}
return uuid;
}
/**
* Converts a address in format "hostname:port" to a InetSocketAddress
*
* @param address
* String which represents the address in format "hostname:port".
* @param defaultPort
* Port that is used if address String is just a hostname.
*/
static protected InetSocketAddress stringToInetSocketAddress(String address, int defaultPort) {
InetSocketAddress isa;
int pos = 0;
if ((pos = address.indexOf(':')) == -1) {
isa = new InetSocketAddress(address, defaultPort);
} else {
isa = new InetSocketAddress(address.substring(0, pos), Integer.parseInt(address
.substring(pos + 1)));
}
return isa;
}
/**
* Returns the UUID of the OSD which stores the object with number "objectNo" of the replica "replica"
*
* @param replica
* @param objectNo
* @return
*/
static public String getOSDUUIDFromObjectNo(Replica replica, long objectNo) {
return replica.getOsdUuids((int) objectNo % replica.getStripingPolicy().getWidth());
}
static public String getOSDUUIDFromXlocSet(XLocSet xlocs, int replicaIndex, int stripeIndex) {
if (xlocs.getReplicasCount() == 0) {
Logging.logMessage(Logging.LEVEL_ERROR, Category.misc, xlocs,
"getOSDUUIDFromXlocSet: Empty replicas list in XlocSet: %s", xlocs.toString());
return "";
}
Replica replica = xlocs.getReplicas(replicaIndex);
if (replica.getOsdUuidsCount() == 0) {
Logging.logMessage(Logging.LEVEL_ERROR, Category.misc, xlocs,
"GetOSDUUIDFromXlocSet: No head OSD available in XlocSet: %s", xlocs.toString());
return "";
}
return replica.getOsdUuids(stripeIndex);
}
static public String getOSDUUIDFromXlocSet(XLocSet xlocs) {
// Get the UUID for the first replica (r=0) and the head OSD (i.e. the
// first stripe, s=0).
return getOSDUUIDFromXlocSet(xlocs, 0, 0);
}
/**
* Creates a list containing the UUIDs for the head OSD of every replica in the XLocSet.
*/
public static List<String> getOSDUUIDsFromXlocSet(XLocSet xlocs) {
List<String> uuids = new ArrayList<String>(xlocs.getReplicasCount());
for (int i = 0; i < xlocs.getReplicasCount(); i++) {
uuids.add(xlocs.getReplicas(i).getOsdUuids(0));
}
return uuids;
}
static public long extractFileIdFromXcap(XCap xcap) {
String fileId = xcap.getFileId();
int start = fileId.indexOf(':');
return Long.parseLong(fileId.substring(start + 1, fileId.length()));
}
static public String extractGlobalFileIdFromXcap(XCap xcap) {
return xcap.getFileId();
}
static public String resolveParentDirectory(String path) {
path = setLeadingSlashIfMissing(path);
int lastSlash = path.lastIndexOf('/');
if (path.equals("/") || lastSlash == 0) {
return "/";
} else {
// We don't allow "path" to have a trailing "/"
assert (lastSlash != path.length() - 1);
return path.substring(0, lastSlash);
}
}
private static String setLeadingSlashIfMissing(String path) {
if (!path.startsWith("/")) {
path = "/" + path;
}
return path;
}
static protected long getNumObjects(UserCredentials userCredentials, Stat fileAttr,
StripingPolicy stripingPolicy) throws IOException, AddressToUUIDNotFoundException,
PosixErrorException {
long fileSize = fileAttr.getSize();
if (fileSize > 0) {
int stripeSize = stripingPolicy.getStripeSize() * 1024;
return ((fileSize - 1) / stripeSize) + 1;
} else
return 0;
}
static public String getBasename(String path) {
path = setLeadingSlashIfMissing(path);
int lastSlash = path.lastIndexOf('/');
if (path.equals("/")) {
return path;
} else {
// We don't allow path to have a trailing "/".
assert (lastSlash != path.length() - 1);
return path.substring(lastSlash + 1);
}
}
static public String concatenatePath(String directory, String file) {
directory = setLeadingSlashIfMissing(directory);
// handle ".." and "."
if (file.equals(".")) {
return directory;
} else {
if (file.equals("..")) {
;
if (directory.equals("/")) {
return "/";
} else {
return directory.substring(0, directory.lastIndexOf('/'));
}
}
}
if (directory.endsWith("/")) {
return directory + file;
} else {
return directory + "/" + file;
}
}
/**
* Compares newResponse with currentResponse. Returns 0 iff newResponse == currentResponse; 1 iff
* newResponse < currentResponse; -1 iff newResponse > currentResponse.
*
* @param newResponse
*
* @param currentResponse
*
* @return {1, 0, -1}
*/
static protected int compareOSDWriteResponses(OSDWriteResponse newResponse,
OSDWriteResponse currentResponse) {
if (newResponse == null && currentResponse == null) {
return 0;
}
// newResponse > currentResponse
if (newResponse != null && currentResponse == null) {
return 1;
}
// newResponse < currentResponse
if (newResponse == null && currentResponse != null) {
return -1;
}
// newResponse > currentResponse
if (newResponse.getTruncateEpoch() > currentResponse.getTruncateEpoch()
|| (newResponse.getTruncateEpoch() == currentResponse.getTruncateEpoch() && newResponse
.getSizeInBytes() > currentResponse.getSizeInBytes())) {
return 1;
}
// newResponse < currentResponse
if (newResponse.getTruncateEpoch() < currentResponse.getTruncateEpoch()
|| (newResponse.getTruncateEpoch() == currentResponse.getTruncateEpoch() && newResponse
.getSizeInBytes() < currentResponse.getSizeInBytes())) {
return -1;
}
// newResponse == currentResponse
return 0;
}
/**
* Compares "lock1 "with "lock2". Returns true if lock1 equals lock2. false otherwise. They are equal iff
* clientUuid, clientPid, offset and length are equal.
*
* @param lock1
* @param lock2
* @return true iff lock1 equals lock2
*/
protected static boolean checkIfLocksAreEqual(Lock lock1, Lock lock2) {
return lock1.getClientUuid().equals(lock2.getClientUuid())
&& lock1.getClientPid() == lock2.getClientPid() && lock1.getOffset() == lock2.getOffset()
&& lock1.getLength() == lock2.getLength();
}
protected static boolean checkIfLocksDoConflict(Lock lock1, Lock lock2) {
// 0 means to lock till the end of the file.
long lock1End = lock1.getLength() == 0 ? 0 : lock1.getLength() + lock1.getOffset();
long lock2End = lock2.getLength() == 0 ? 0 : lock2.getLength() + lock2.getOffset();
// Check for overlaps
if (lock1End == 0) {
if (lock2End >= lock1.getOffset() || lock2End == 0) {
return true;
}
}
if (lock2End == 0) {
if (lock1End >= lock2.getOffset() || lock1End == 0) {
return true;
}
}
// Overlapping?
if (!(lock1End < lock2.getOffset() || lock2End < lock1.getOffset())) {
// Does overlap! Check for conflicting modes.
return (lock1.getExclusive() || lock2.getExclusive());
}
return false;
}
public static String policiesToString(OSDSelectionPolicyType[] policies) {
StringBuffer policiesSB = new StringBuffer();
boolean firstEntry = true;
for (OSDSelectionPolicyType policy : policies) {
if (firstEntry) {
firstEntry = false;
} else {
policiesSB.append(",");
}
policiesSB.append(String.valueOf(policy.getNumber()));
}
return policiesSB.toString();
}
/**
* Convert the given flags to their corresponding bit patterns and combine them by an or.
*
* @param flags
* Variable number of SYSTEM_V_FCNTL flags
* @return bit pattern as an integer of the or'ed flags
*/
public static int flagsToInt(SYSTEM_V_FCNTL... flags) {
int flagsInt = 0;
for (SYSTEM_V_FCNTL flag: flags) {
flagsInt = flagsInt | flag.getNumber();
}
return flagsInt;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,100 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.DirectoryEntries;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Stat;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.listxattrResponse;
/**
*
* Represents an entry in the metadatacache.
*/
public class MetadataCacheEntry {
private long statTimeoutS;
private long dirEntriesTimeoutS;
private long xattrTimeoutS;
private long timeoutS;
private Stat stat;
private DirectoryEntries dirEntries;
private String path;
private listxattrResponse xattrs;
protected Stat getStat() {
return stat;
}
protected long getStatTimeoutS() {
return statTimeoutS;
}
protected void setStatTimeoutS(long timeout) {
this.statTimeoutS = timeout;
}
protected long getTimeoutS() {
return timeoutS;
}
protected void setTimeoutS(long timeout) {
this.timeoutS = timeout;
}
protected void setStat(Stat stat) {
this.stat = stat;
}
protected DirectoryEntries getDirectoryEntries() {
return dirEntries;
}
protected void setDirectoryEntries(DirectoryEntries dirEntries) {
this.dirEntries = dirEntries;
}
protected long getDirectoryEntriesTimeoutS() {
return dirEntriesTimeoutS;
}
protected void setDirectoryEntriesTimeoutS(long timeout) {
this.dirEntriesTimeoutS = timeout;
}
protected void setPath(String path) {
this.path = path;
}
protected String getPath() {
return path;
}
protected listxattrResponse getXattrs() {
return xattrs;
}
protected void setXattrs(listxattrResponse xattrs) {
this.xattrs = xattrs;
}
protected long getXattrTimeoutS() {
return xattrTimeoutS;
}
protected void setXattrTimeoutS(long timeout) {
this.xattrTimeoutS = timeout;
}
}

View File

@@ -0,0 +1,272 @@
/*
* Copyright (c) 2008-2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import org.xtreemfs.foundation.SSLOptions;
import org.xtreemfs.foundation.VersionManagement;
/**
* Represents all possible options for libxtreemfs.
*/
public class Options {
enum XtreemFSServiceType {
kDIR, kMRC
};
// Optimizations.
/**
* Maximum number of entries of the StatCache. Default: 100000
*/
private int metadataCacheSize = 100000;
/**
* Time to live for MetadataCache entries. Default: 120
*/
private final long metadataCacheTTLs = 120;
/**
* Maximum number of pending bytes (of async writes) per file. TODO: Reenable async writes when retry
* support is completed.
*/
private int maxWriteahead = 0;
/**
* Maximum number of pending async write requests per file. Default: 10
*/
private final int maxWriteaheadRequests = 10;
/**
* Number of retrieved entries per readdir request. Default: 1024
*/
private int readdirChunkSize = 1024;
// Error Handling options.
/**
* How often shall a failed operation get retried? Default: 40
*/
private int maxTries = 40;
/**
* How often shall a failed read operation get retried? Default: 40
*/
private int maxReadTries = 40;
/**
* How often shall a failed write operation get retried? Default: 40
*/
private final int maxWriteTries = 40;
/**
* How often shall a view be tried to renewed?
*/
private int maxViewRenewals = 5;
/**
* How long to wait after a failed request? Default: 15
*/
private final int retryDelay_s = 15;
/**
* Stops retrying to execute a synchronous request if this signal was send to the thread responsible for
* the execution of the request. Default: 0
*/
private final int interruptSignal = 0;
/**
* Maximum time until a connection attempt will be aborted. Default: 60
*/
private final int connectTimeout_s = 60;
/**
* Maximum time until a request will be aborted and the response returned. Default:
*/
private final int requestTimeout_s = 30;
/**
* The RPC Client closes connections after "linger_timeout_s" time of inactivity. Default: 600
*/
private final int lingerTimeout_s = 600;
public int getMetadataCacheSize() {
return metadataCacheSize;
}
public void setMetadataCacheSize(int metadataCacheSize) {
this.metadataCacheSize = metadataCacheSize;
}
public long getMetadataCacheTTLs() {
return metadataCacheTTLs;
}
public int getInterruptSignal() {
return interruptSignal;
}
public int getConnectTimeout_s() {
return connectTimeout_s;
}
public int getRequestTimeout_s() {
return requestTimeout_s;
}
public int getLingerTimeout_s() {
return lingerTimeout_s;
}
// SSL options.
private final String sslPemCertPath = "";
private final String sslPemPath = "";
private final String sslPemKeyPass = "";
private final String sslPKCS2Path = "";
private final String sslPKCS12Pass = "";
// Grid Support options.
/**
* True, if the XtreemFS Grid-SSL Mode (only SSL handshake, no encryption of data itself) shall be used.
* Default: false
*/
private final boolean gridSSL = false;
/**
* True if the Globus user mapping shall be used. Default: false
* */
private final boolean gridAuthModeGlobus = false;
/**
* True if the Unicore user mapping shall be used. Default: false
* */
private final boolean gridAuthModeUnicore = false;
/**
* Location of the gridmap file. Default: ""
*/
private final String gridGridmapLocation = "";
/**
* Default Location of the Globus gridmap file. Default: "/etc/grid-security/grid-mapfile"
*/
private final String gridGridmapLocationDefaultGlobus = "/etc/grid-security/grid-mapfile";
/**
* Default Location of the Unicore gridmap file. Default: "/etc/grid-security/d-grid_uudb"
*/
private final String gridGridmapLocationDefaultUnicore = "/etc/grid-security/d-grid_uudb";
/**
* Periodic interval after which the gridmap file will be reloaded. Default: 60
*/
private final int gridGridmapReloadInterval_m = 60; // 60 minutes = 1
// hour
// Advanced XtreemFS options
/**
* Interval for periodic file size updates in seconds. Default: 60
*/
private int periodicFileSizeUpdatesIntervalS = 60;
/**
* Interval for periodic xcap renewal in seconds. Default: 60
*/
private final int periodicXcapRenewalIntervalS = 60;
protected int getPeriodicXcapRenewalIntervalS() {
return periodicXcapRenewalIntervalS;
}
protected int getPeriodicFileSizeUpdatesIntervalS() {
return periodicFileSizeUpdatesIntervalS;
}
/**
* Returns the version string and prepends "component".
*/
String showVersion(String component) {
return component + VersionManagement.RELEASE_VERSION;
}
/**
* Return the version.
*
*/
String getVersion() {
return VersionManagement.RELEASE_VERSION;
}
/**
* Creates a new SSLOptions object based on the value of the members: - sslPem_path - sslPemCertPath -
* sslPemKeyPass - sslPkcs12Path - sslPkcs12Pass - gridSsl || protocol.
*
*/
public SSLOptions generateSSLOptions() {
SSLOptions sslOptions = null;
if (sslEnabled()) {
// TODO: Find out how to create SSLOptions object.
// sslOptions = new SSLOptions(new FileInputStream(new File(sslPemPath)),
// sslPemKeyPass, sslPemCertPath,
// new FileInputStream(new File(sslPKCS2Path)),
// sslPKCS12Pass, new String(), gridSSL || protocol == Schemes.SCHEME_PBRPCG );
}
return null;
}
/** Extract volume name and dir service address from dir_volume_url. */
protected void parseURL(XtreemFSServiceType service_type) {
// TODO: Implement!
}
public boolean sslEnabled() {
return !sslPemCertPath.isEmpty() || !sslPKCS2Path.isEmpty();
}
public int getMaxTries() {
return maxTries;
}
public void setMaxTries(int maxTries) {
this.maxTries = maxTries;
}
protected int getMaxWriteTries() {
return maxWriteTries;
}
public int getMaxViewRenewals() {
return maxViewRenewals;
}
public void setMaxViewRenewals(int maxViewRenewals) {
this.maxViewRenewals = maxViewRenewals;
}
public int getRetryDelay_s() {
return retryDelay_s;
}
public int getMaxWriteahead() {
return maxWriteahead;
}
protected int getMaxWriteaheadRequests() {
return maxWriteaheadRequests;
}
public int getReaddirChunkSize() {
return readdirChunkSize;
}
public void setReaddirChunkSize(int readdirChunkSize) {
this.readdirChunkSize = readdirChunkSize;
}
public void setPeriodicFileSizeUpdatesIntervalS(int periodicFileSizeUpdatesIntervalS) {
this.periodicFileSizeUpdatesIntervalS = periodicFileSizeUpdatesIntervalS;
}
public void setMaxWriteAhead(int maxWriteAhead) {
this.maxWriteahead = maxWriteAhead;
}
public void setMaxReadTries(int maxReadTries) {
this.maxReadTries = maxReadTries;
}
public int getMaxReadTries() {
return maxReadTries;
}
}

View File

@@ -0,0 +1,69 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import java.util.Map.Entry;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
/**
* Updates periodically the fileSize at the MRC.
*/
public class PeriodicFileSizeUpdateThread extends Thread {
private VolumeImplementation volume = null;
public PeriodicFileSizeUpdateThread(VolumeImplementation volume, boolean startAsDaemon) {
this.volume = volume;
setDaemon(startAsDaemon);
}
/*
* (non-Javadoc)
*
* @see java.lang.Thread#run()
*/
@Override
public void run() {
while (!isInterrupted()) {
// send thread to sleep (default 1minute)
try {
Thread.sleep(volume.getOptions().getPeriodicFileSizeUpdatesIntervalS() * 1000);
} catch (InterruptedException e) {
break;
}
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"START openFileTable: Periodic filesize update for %s open files.", volume
.getOpenFileTable().size());
}
// Iterate over the openFileTable
for (Entry<Long, FileInfo> entry : volume.getOpenFileTable().entrySet()) {
try {
entry.getValue().writeBackFileSizeAsync();
} catch (IOException e) {
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"PeriodicFileSizeUpdateThread: failed to update filesize. Reason: ",
e.getMessage());
}
}
}
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"END openFileTable: Periodic filesize update for %s open files.", volume
.getOpenFileTable().size());
}
}
}
}

View File

@@ -0,0 +1,69 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.util.Map.Entry;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
/**
* Renews periodically the XCap
*/
public class PeriodicXcapRenewalThread extends Thread {
private VolumeImplementation volume = null;
public PeriodicXcapRenewalThread(VolumeImplementation volume, boolean startAsDaemon) {
this.volume = volume;
setDaemon(startAsDaemon);
}
/*
* (non-Javadoc)
*
* @see java.lang.Thread#run()
*/
@Override
public void run() {
while (!isInterrupted()) {
// send thread to sleep (default 1minute)
try {
Thread.sleep(volume.getOptions().getPeriodicXcapRenewalIntervalS()*1000);
} catch (Exception e) {
break;
}
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"START openFileTable: Periodic Xcap renewal for %s open files.", volume
.getOpenFileTable().size());
}
// iterate over the openFileTable.
for (Entry<Long, FileInfo> entry : volume.getOpenFileTable().entrySet()) {
try {
entry.getValue().renewXCapsAsync();
} catch (AddressToUUIDNotFoundException e) {
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"PeriodicXCapThread: failed to renew XCap. Reason: ",
e.getMessage());
}
}
}
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, this,
"END openFileTable: Periodic Xcap renewal for %s open files.", volume
.getOpenFileTable().size());
}
}
}
}

View File

@@ -0,0 +1,314 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.InternalServerErrorException;
import org.xtreemfs.common.libxtreemfs.exceptions.InvalidViewException;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.common.libxtreemfs.exceptions.XtreemFSException;
import org.xtreemfs.foundation.buffer.BufferPool;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
import org.xtreemfs.foundation.logging.Logging;
import org.xtreemfs.foundation.logging.Logging.Category;
import org.xtreemfs.foundation.pbrpc.client.PBRPCException;
import org.xtreemfs.foundation.pbrpc.client.RPCResponse;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.Auth;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.ErrorType;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.POSIXErrno;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.SERVICES;
import com.google.protobuf.Message;
/**
* Helper class provides static methods for all kinds of RPC calls to the servers. Abstracts error
* handling and retrying of failure.
*/
public class RPCCaller {
/**
* Interface for syncCall which generates the calls. Will be called for each retry.
*/
protected interface CallGenerator<C, R extends Message> {
public RPCResponse<R> executeCall(InetSocketAddress server, Auth authHeader,
UserCredentials userCreds, C input) throws IOException, PosixErrorException;
}
protected static <C, R extends Message> R syncCall(SERVICES service, UserCredentials userCreds,
Auth auth, Options options, UUIDResolver uuidResolver, UUIDIterator it,
boolean uuidIteratorHasAddresses, C callRequest, CallGenerator<C, R> callGen) throws IOException,
PosixErrorException, InternalServerErrorException, AddressToUUIDNotFoundException {
return syncCall(service, userCreds, auth, options, uuidResolver, it, uuidIteratorHasAddresses, false,
options.getMaxTries(), callRequest, null, callGen);
}
protected static <C, R extends Message> R
syncCall(SERVICES service, UserCredentials userCreds, Auth auth, Options options,
UUIDResolver uuidResolver, UUIDIterator it, boolean uuidIteratorHasAddresses,
boolean delayNextTry, int maxRetries, C callRequest, CallGenerator<C, R> callGen) throws IOException,
PosixErrorException, InternalServerErrorException, AddressToUUIDNotFoundException {
return syncCall(service, userCreds, auth, options, uuidResolver, it, uuidIteratorHasAddresses,
delayNextTry, maxRetries, callRequest, null, callGen);
}
protected static <C, R extends Message> R syncCall(SERVICES service, UserCredentials userCreds,
Auth auth, Options options, UUIDResolver uuidResolver, UUIDIterator it,
boolean uuidIteratorHasAddresses, C callRequest, ReusableBuffer buf, CallGenerator<C, R> callGen)
throws IOException, PosixErrorException, InternalServerErrorException,
AddressToUUIDNotFoundException {
return syncCall(service, userCreds, auth, options, uuidResolver, it, uuidIteratorHasAddresses, false,
options.getMaxTries(), callRequest, buf, callGen);
}
protected static <C, R extends Message> R syncCall(SERVICES service, UserCredentials userCreds,
Auth auth, Options options, UUIDResolver uuidResolver, UUIDIterator it,
boolean uuidIteratorHasAddresses, boolean delayNextTry, int maxRetries, C callRequest,
ReusableBuffer buffer, CallGenerator<C, R> callGen) throws PosixErrorException, IOException,
InternalServerErrorException, AddressToUUIDNotFoundException {
int maxTries = maxRetries;
int attempt = 0;
R response = null;
try {
while (++attempt <= maxTries || maxTries == 0) {
// Retry only if it is a recoverable error (REDIRECT, IO_ERROR, INTERNAL_SERVER_ERROR).
boolean retry = false;
IOException responseError = null;
RPCResponse<R> r = null;
try {
// create an InetSocketAddresse depending on the uuidIterator and
// the kind of service
InetSocketAddress server;
if (uuidIteratorHasAddresses) {
server = getInetSocketAddressFromAddress(it.getUUID(), service);
} else { // UUIDIterator has really1 UUID, not just address Strings.
String address = uuidResolver.uuidToAddress(it.getUUID());
server = getInetSocketAddressFromAddress(address, service);
}
r = callGen.executeCall(server, auth, userCreds, callRequest);
response = r.get();
// If the buffer is not null it should be filled with data
// piggybacked in the RPCResponse.
// This is used by the read request.
if (r.getData() != null) {
if (buffer != null) {
buffer.put(r.getData());
}
BufferPool.free(r.getData());
}
} catch (PBRPCException pbe) {
responseError = pbe;
// handle special redirect
if (pbe.getErrorType().equals(ErrorType.REDIRECT)) {
assert (pbe.getRedirectToServerUUID() != null);
// Log redirect.
if (Logging.isInfo()) {
String error;
if (uuidIteratorHasAddresses) {
error =
"The server " + it.getUUID() + " redirected to the current master: "
+ pbe.getRedirectToServerUUID() + " at attempt: " + attempt;
} else {
error =
"The server with UUID " + it.getUUID()
+ " redirected to the current master: "
+ pbe.getRedirectToServerUUID() + " at attempt: " + attempt;
}
Logging.logMessage(Logging.LEVEL_INFO, Category.misc, pbe, error);
}
if (maxTries != 0 && attempt == maxTries) {
// This was the last retry, but we give it another chance.
maxTries++;
}
// Do a fast retry and do not delay until next attempt.
it.markUUIDAsFailed(it.getUUID());
continue;
}
if (pbe.getErrorType().equals(ErrorType.IO_ERROR)
|| pbe.getErrorType().equals(ErrorType.INTERNAL_SERVER_ERROR)) {
// Mark the current UUID as failed and get the next one.
it.markUUIDAsFailed(it.getUUID());
retry = true;
}
} catch (IOException ioe) {
responseError = ioe;
// Mark the current UUID as failed and get the next one.
it.markUUIDAsFailed(it.getUUID());
retry = true;
} catch (InterruptedException ie) {
// TODO: Ask what that is.
if (options.getInterruptSignal() == 0) {
if (Logging.isDebug()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, ie,
"Caught interrupt, aborting sync request");
}
break;
}
throw new IOException();
} finally {
if (r != null) {
r.freeBuffers();
}
}
if (responseError != null) {
// Log only the first retry.
if (attempt == 1 && maxTries != 1) {
String retriesLeft = (maxTries == 0) ? ("infinite") : (String.valueOf(maxTries - attempt));
Logging.logMessage(Logging.LEVEL_ERROR, Category.misc, responseError,
"Got no response from %s, " + "retrying (%s attemps left, waiting at least %s seconds"
+ " between two attemps) Error was: %s", it.getUUID(), retriesLeft,
options.getRetryDelay_s(), responseError.getMessage());
if (Logging.isDebug()) {
Logging.logError(Logging.LEVEL_DEBUG, null, responseError);
}
}
// Retry (and delay)?
if (retry &&
// Retry (and delay) only if at least one retry is left
(attempt < maxTries || maxTries == 0)
// or this last retry should be delayed
|| (attempt == maxTries && delayNextTry)) {
waitDelay(options.getRetryDelay_s());
continue;
} else {
throw responseError;
}
}
return response;
}
} catch (PBRPCException e) {
// Max attempts reached or non-IO error seen. Throw an exception.
handleErrorAfterMaxTriesExceeded(e, it);
}
return null;
}
/**
* Blocks the thread for delay_s seconds and throws an exception if interrupted.
*
* @param delay_s
* @throws IOException
*/
static void waitDelay(long delay_s) throws IOException {
try {
Thread.sleep(delay_s * 1000);
} catch (InterruptedException e) {
String msg = "Caught interrupt while waiting for the next attempt, aborting sync request";
if (Logging.isInfo()) {
Logging.logMessage(Logging.LEVEL_DEBUG, Category.misc, e, msg);
}
throw new IOException(msg);
}
}
/**
* Determines what to throw when the maximum number of retries is reached and there is still no valid
* answer.
*
* @param e
* @param it
*/
private static void handleErrorAfterMaxTriesExceeded(PBRPCException e, UUIDIterator it) throws PosixErrorException,
IOException, InternalServerErrorException, InvalidViewException, XtreemFSException {
// By default all errors are logged as errors.
int logLevel = Logging.LEVEL_INFO;
String errorMsg = "";
switch (e.getErrorType()) {
case ERRNO:
// Posix error are usally not logged as errors.
if (e.getPOSIXErrno().equals(POSIXErrno.POSIX_ERROR_ENOENT)) {
logLevel = Logging.LEVEL_DEBUG;
}
errorMsg =
"The server " + it.getUUID() + " denied the requested operation. " + "Error value: "
+ e.getErrorType().name() + " Error message: " + e.getErrorMessage();
Logging.logMessage(logLevel, Category.misc, e, errorMsg);
throw new PosixErrorException(e.getPOSIXErrno(), errorMsg);
case IO_ERROR:
Logging.logMessage(logLevel, Category.misc, e, "The client encountered a communication "
+ "error sending a request to the server %s Error: %s", it.getUUID(), e.getErrorMessage());
throw new IOException(e.getErrorMessage());
case INTERNAL_SERVER_ERROR:
Logging.logMessage(logLevel, Category.misc, e, "The server %s returned an internal server"
+ "error: %s", it.getUUID(), e.getErrorMessage());
throw new InternalServerErrorException(errorMsg);
case REDIRECT:
throw new XtreemFSException("This error (A REDIRECT error was not handled "
+ "and retried but thrown instead) should never happen. Report this");
case INVALID_VIEW:
Logging.logMessage(logLevel, Category.replication, e,
"The server %s denied the requested operation because the clients view is outdated. Error: %s",
it.getUUID(), e.getErrorMessage());
throw new InvalidViewException(e.getErrorMessage());
default:
errorMsg =
"The server " + it.getUUID() + "returned an error: " + e.getErrorType().name()
+ " Error: " + e.getErrorMessage();
Logging.logMessage(logLevel, Category.misc, e, errorMsg);
throw new XtreemFSException(errorMsg);
} // end of switch
}
// private static void handlePBException(PBRPCException e) throws IOException, PosixErrorException {
// int loglevel = Logging.LEVEL_INFO;
//
// switch (e.getErrorType()) {
//
// case ErrorType.ERRNO:
// // Posix errors are usually not logged as errors.
// if (e.getPOSIXErrno().equals(POSIXErrno.POSIX_ERROR_ENOENT)) {
// loglevel = Logging.LEVEL_DEBUG;
// }
// Logging.logMessage(loglevel, Category.misc, e, "The server %s (" + , args)
//
// default:
// return;
// }
// }
/**
* Create an InetSocketAddress depending on the address and the type of service object is. If address does
* not contain a port a default port depending on the client object is used.
*
* @param address
* The address.
* @param service
* The service used to determine which default port should used when address does not contain a
* port.
* @return
*/
protected static InetSocketAddress getInetSocketAddressFromAddress(String address, SERVICES service) {
if (SERVICES.DIR.equals(service)) {
return Helper.stringToInetSocketAddress(address,
GlobalTypes.PORTS.DIR_PBRPC_PORT_DEFAULT.getNumber());
}
if (SERVICES.MRC.equals(service)) {
return Helper.stringToInetSocketAddress(address,
GlobalTypes.PORTS.MRC_PBRPC_PORT_DEFAULT.getNumber());
}
if (SERVICES.OSD.equals(service)) {
return Helper.stringToInetSocketAddress(address,
GlobalTypes.PORTS.OSD_PBRPC_PORT_DEFAULT.getNumber());
}
return null;
}
}

View File

@@ -0,0 +1,59 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
/**
*
* Represents a read operation for the StripeTranslator.
*/
public class ReadOperation {
private long objNumber;
private int osdOffset;
private int reqSize;
private int reqOffset;
/**
* The position where the result of this request should put into the global Buffer.
*/
private int bufferStart;
protected ReadOperation(long objNumber, int osdOffset, int reqSize, int reqOffset, int bufferStart) {
this.objNumber = objNumber;
this.osdOffset = osdOffset;
this.reqSize = reqSize;
this.reqOffset = reqOffset;
this.bufferStart = bufferStart ;
}
protected long getObjNumber() {
return objNumber;
}
public int getOsdOffset() {
return osdOffset;
}
protected int getReqSize() {
return reqSize;
}
protected int getReqOffset() {
return reqOffset;
}
protected int getBufferStart() {
return bufferStart;
}
}

View File

@@ -0,0 +1,40 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.util.Vector;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicy;
public interface StripeTranslator {
/**
* Create a new {@link ReadOperation} from "size","offset" and "policy" and append the new
* {@link ReadOperation} to operations.
*
* @param size
* @param offset
* @param policy
* @param operations
*/
public abstract void translateReadRequest(int size, long offset, StripingPolicy policy,
Vector<ReadOperation> operations);
/**
* Create a new {@link WriteOperation} from "size","offset" and "policy" and append the new
* {@link WriteOperation} to operations.
*
* @param size
* @param offset
* @param policy
* @param buf
* @param operations
*/
public abstract void translateWriteRequest(int size, long offset, StripingPolicy policy,
ReusableBuffer buf, Vector<WriteOperation> operations);
}

View File

@@ -0,0 +1,53 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.util.Vector;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.StripingPolicy;
public class StripeTranslatorRaid0 implements StripeTranslator {
public void translateWriteRequest(int size, long offset, StripingPolicy policy, ReusableBuffer buf,
Vector<WriteOperation> operations) {
// need to know stripe size and stripe width
int stripeSize = policy.getStripeSize() * 1024; // stripe size in kB
int osdCount = policy.getWidth();
int start = 0;
while (start < size) {
long objNumber = (start + offset) / stripeSize;
int osdOffset = (int) (objNumber % osdCount);
int reqOffset = (int) ((start + offset) % stripeSize);
int reqSize = Math.min(size - start, stripeSize - reqOffset);
ReusableBuffer viewBuffer = buf.createViewBuffer();
viewBuffer.range(start, reqSize);
operations.add(new WriteOperation(objNumber, osdOffset, reqSize, reqOffset, viewBuffer));
start += reqSize;
}
}
public void translateReadRequest(int size, long offset, StripingPolicy policy,
Vector<ReadOperation> operations) {
// need to know stripe size and stripe width
int stripeSize = policy.getStripeSize() * 1024; // strip size in kB
int osdCount = policy.getWidth();
int start = 0;
while (start < size) {
long objNumber = (start + offset) / stripeSize;
int osdOffset = (int) (objNumber % osdCount);
int reqOffset = (int) ((start + offset) % stripeSize);
int reqSize = Math.min(size - start, stripeSize - reqOffset);
operations.add(new ReadOperation(objNumber, osdOffset, reqSize, reqOffset, start));
start += reqSize;
}
}
}

View File

@@ -0,0 +1,32 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
/**
*
* Java donsn't have tupels.
*/
public class Tupel<T,V> {
private T object1;
private V object2;
protected Tupel(T object1, V object2) {
this.object1 = object1;
this.object2 = object2;
}
protected T getFirst() {
return object1;
}
protected V getSecond() {
return object2;
}
}

View File

@@ -0,0 +1,190 @@
/*
* Copyright (c) 2008-2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.util.ArrayList;
import java.util.Collection;
import org.xtreemfs.common.libxtreemfs.exceptions.UUIDIteratorListIsEmpyException;
/**
* Stores a list of all UUIDs of a replicated service and allows to iterate through them.
*
* If an UUID was marked as failed and this is the current UUID, the next call of GetUUID() will return
* another available, not as failed marked, UUID.
*
* If the last UUID in the list is marked as failed, the status of all entries will be reset and the current
* UUID is set to the first in the list.
*
* Additionally, it is allowed to set the current UUID to a specific one, regardless of its current state.
* This is needed in case a service did redirect a request to another UUID.
*/
public class UUIDIterator {
static private class UUIDItem {
public String uuid;
/**
* Shows whether this UUID has failed
*/
public boolean markedAsFailed;
/**
* Represents a UUID
*/
public UUIDItem(String addUUID) {
markedAsFailed = false;
uuid = addUUID;
}
}
private ArrayList<UUIDItem> uuids;
private UUIDItem currentUUID;
/**
* Creates a new instance of UUIDIterator with an empty UUID list.
*/
public UUIDIterator() {
uuids = new ArrayList<UUIDIterator.UUIDItem>();
currentUUID = null;
}
/**
* Creates a new instance of UUIDIterator with an UUID list containing all the UUIDs in the specified collection.
*/
public UUIDIterator(Collection<String> uuids) {
this();
addUUIDs(uuids);
}
/**
* Appends "uuid" to the list of UUIDs. Does not change the current UUID.
*/
public synchronized void addUUID(String uuid) {
UUIDItem entry = new UUIDItem(uuid);
uuids.add(entry);
if (uuids.size() == 1) {
currentUUID = entry;
}
}
/**
* Appends all the UUIDs in the specified collection to list of UUIDs. Does not change the current UUID.
*/
public synchronized void addUUIDs(Collection<String> uuids) {
for (String uuid : uuids) {
addUUID(uuid);
}
}
/**
* Clears the list of UUIDs.
*/
public synchronized void clear() {
uuids.clear();
currentUUID = null;
}
/**
* Atomically clears the list and adds "uuid" to avoid an empty list.
*/
public synchronized void clearAndAddUUID(String uuid) {
this.clear();
this.addUUID(uuid);
}
/**
* Atomically clears the list and adds all the UUIDs in the specified collection.
*/
public synchronized void clearAndAddUUIDs(Collection<String> uuids) {
this.clear();
this.addUUIDs(uuids);
}
/**
* Returns the list of UUIDs and their status.
*/
public synchronized String debugString() {
StringBuffer debugStringBuffer = new StringBuffer("[ ");
for (UUIDItem item : uuids) {
debugStringBuffer.append("[ " + item.uuid + ", " + item.markedAsFailed + " ]");
}
debugStringBuffer.append(" ]");
return debugStringBuffer.toString();
}
/**
* Get the current UUID (by default the first in the list).
**/
public synchronized String getUUID() throws UUIDIteratorListIsEmpyException {
if (uuids.isEmpty()) {
throw new UUIDIteratorListIsEmpyException("GetUUID() failed as no current "
+ " UUID is set. Size of list of UUIDs: " + uuids.size());
} else {
assert (!currentUUID.markedAsFailed);
return currentUUID.uuid;
}
}
/**
* Marks "uuid" as failed. Use this function to advance to the next in the list.
*/
public synchronized void markUUIDAsFailed(String uuid) {
// Only do something if currentUUID is uuid
if (currentUUID != null && currentUUID.uuid.equals(uuid)) {
currentUUID.markedAsFailed = true;
int index = uuids.indexOf(currentUUID);
// if this is the last UUID in the list, revert all
if (index == (uuids.size() - 1)) {
for (UUIDItem item : uuids) {
item.markedAsFailed = false;
}
currentUUID = uuids.get(0);
} else { // set currenUUID to the following UUID
currentUUID = uuids.get(index + 1);
}
}
}
/**
* Sets "uuid" as current UUID. If uuid was not found in the list of UUIDs, it will be added to the
* UUIDIterator.
*/
public synchronized void setCurrentUUID(String uuid) {
// Search "uuid" in "uuids_" and set it to the current UUID.
for (UUIDItem item : uuids) {
if (item.uuid.equals(uuid)) {
currentUUID = item;
return;
}
}
// UUID was not found, add it.
UUIDItem entry = new UUIDItem(uuid);
uuids.add(entry);
currentUUID = entry;
return;
}
/**
* Get the number of UUIDs in this iterator regardless if they are marked as failed or not.
*
* @return int
* Number of UUIDs.
*/
public synchronized int size() {
return uuids.size();
}
}

View File

@@ -0,0 +1,48 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.VolumeNotFoundException;
public interface UUIDResolver {
/**
* Resolves the address (ip-address:port) for a given UUID.
*
* @return String
* The resolved address as String.
*
* @throws AddressToUUIDNotFoundException
*/
public String uuidToAddress(String uuid) throws AddressToUUIDNotFoundException;
/**
* Resolves the UUID for a given volume name.
*
* @param volumeName
* Name of the volume.
* @return String
* UUID of the MRC the volume 'volumeName' is registered.
*
* @throws AddressToUUIDNotFoundException
* @throws VolumeNotFoundException
*/
public String volumeNameToMRCUUID(String volumeName) throws VolumeNotFoundException, AddressToUUIDNotFoundException;
/**
* Resolves the list of UUIDs of the MRC replicas and adds them to the uuid_iterator object.
*
* @throws VolumeNotFoundException
* @throws AddressToUUIDNotFoundException
*/
public void volumeNameToMRCUUID(String volumeName, UUIDIterator uuidIterator)
throws VolumeNotFoundException, AddressToUUIDNotFoundException;
}

View File

@@ -0,0 +1,788 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.xtreemfs.common.ReplicaUpdatePolicies;
import org.xtreemfs.common.libxtreemfs.exceptions.AddressToUUIDNotFoundException;
import org.xtreemfs.common.libxtreemfs.exceptions.PosixErrorException;
import org.xtreemfs.common.xloc.ReplicationFlags;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.Auth;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.UserCredentials;
import org.xtreemfs.mrc.metadata.ReplicationPolicy;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.REPL_FLAG;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replica;
import org.xtreemfs.pbrpc.generatedinterfaces.GlobalTypes.Replicas;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.DirectoryEntries;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.Stat;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.StatVFS;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.XATTR_FLAGS;
import org.xtreemfs.pbrpc.generatedinterfaces.MRC.listxattrResponse;
/**
* Represents a volume. A volume object can be obtain by opening a volume with a client.
*/
public interface Volume {
public void internalShutdown();
/**
* Start this volume, e.g. initialize all required things.
*/
void start() throws Exception;
/**
* Same as start(), but add option to start threads as daemons. Daemon threads are only used by the XtreemFSHadoopClient.
*
* @param startThreadsAsDaemons if true, all threads are daemons.
*/
void start(boolean startThreadsAsDaemons) throws Exception;
/**
* Close the volume.
*/
public void close();
/**
* Returns information about the volume (e.g. used/free space).
*
* @param userCredentials
* Name and groups of the User.
* @return {@link StatVFS}
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public StatVFS statFS(UserCredentials userCredentials) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
*
* Resolves the symbolic link at "path" and returns it in "linkTargetPath".
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the symbolic link.
* @return String where to store the result.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public String readLink(UserCredentials userCredentials, String path) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
*
* Creates a symbolic link pointing to "targetPath" at "linkPath".
*
* @param userCredentials
* Name and Groups of the user.
* @param targetPath
* Path to the target.
* @param linkPath
* Path to the symbolic link.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public void symlink(UserCredentials userCredentials, String targetPath, String linkPath)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Creates a hard link pointing to "targetPath" at "linkPath".
*
* @param userCredentials
* Name and Groups of the user.
* @param targetPath
* Path to the target.
* @param linkPath
* Path to the hard link.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public void link(UserCredentials userCredentials, String targetPath, String linkPath)
throws IOException, PosixErrorException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Tests if the subject described by "userCredentials" is allowed to access "path" as specified by
* "flags". "flags" is a bit mask which may contain the values ACCESS_FLAGS_{F_OK,R_OK,W_OK,X_OK}.
*
* Throws a PosixErrorException if not allowed.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
* @param flags
* Open flags as specified in xtreemfs::pbrpc::SYSTEM_V_FCNTL.
*
* @throws AddressToUUIDNotFoundException
* @throws IOException
* @throws PosixErrorException
*/
public void access(UserCredentials userCredentials, String path, int flags) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
* Opens a file and returns the pointer to a {@link FileHandle} object.
*
* When creating files, use the function {@link #openFile(UserCredentials, String, int, int)} with the
* additional mode parameter instead.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file.
* @param flags
* Open flags as specified in xtreemfs::pbrpc::SYSTEM_V_FCNTL.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*
* @remark Ownership is NOT transferred to the caller. Instead FileHandle.close() has to be called to
* destroy the object.
*/
public FileHandle openFile(UserCredentials userCredentials, String path, int flags)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Same as previous openFile() except for the additional mode parameter, which sets the permissions for
* the file in case SYSTEM_V_FCNTL_H_O_CREAT is specified as flag and the file will be created.
*
* Please note that the mode parameter requires octal values, i.e. use 0777 instead of 777 for the
* permissions.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public FileHandle openFile(UserCredentials userCredentials, String path, int flags, int mode)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Truncates the file to "newFileSize" bytes.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file.
* @param newFileSize
* New size of file.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void truncate(UserCredentials userCredentials, String path, int newFileSize)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Retrieve the attributes of a file and writes the result in "stat".
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
* @return stat Result of the operation will be stored here.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public Stat getAttr(UserCredentials userCredentials, String path) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
* Sets the attributes given by "stat" and specified in "toSet".
*
* @note If the mode, uid or gid is changed, the ctime of the file will be updated according to POSIX
* semantics.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
* @param stat
* Stat object with attributes which will be set.
* @param toSet
* Bitmask which defines which attributes to set.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void setAttr(UserCredentials userCredentials, String path, Stat stat, int toSet)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Remove the file at "path" (deletes the entry at the MRC and all objects on one OSD).
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void unlink(UserCredentials userCredentials, String path) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
* Rename a file or directory "path" to "newPath".
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Old path.
* @param newPath
* New path.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
* */
public void rename(UserCredentials userCredentials, String path, String newPath)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Creates a directory with the modes "mode". Creates missing parent directories if and only if recursive
* is set to true. Results in an error otherwise.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the new directory.
* @param mode
* Permissions of the new directory.
* @param recursive
* Whether or not non existing parent directories should be created.
*
* @throws IOException
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public void createDirectory(UserCredentials userCredentials, String path, int mode,
boolean recursive) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Creates a directory with the modes "mode". Results in an error when parent directory doesn't exist.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the new directory.
* @param mode
* Permissions of the new directory.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void createDirectory(UserCredentials userCredentials, String path, int mode)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Removes the directory at "path" which has to be empty.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the directory to be removed.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void removeDirectory(UserCredentials userCredentials, String path) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
* Returns a list of "count" directories/files contained in the directory "path" beginning by "offset". If
* count equals 0 all entries beginning by "offset" will be in the list.
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the directory.
* @param offset
* Index of first requested entry.
* @param count
* Number of requested entries.
* @param namesOnly
* If set to true, the {@link Stat} object of every entry will be omitted.
*
* @return {@link DirectoryEntries} will contain the names of the entries and, if not disabled by
* "namesOnly", a {@link Stat} object for every entry.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public DirectoryEntries readDir(UserCredentials userCredentials, String path, int offset,
int count, boolean namesOnly) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* Returns the list of extended attributes stored for "path" (Entries may be cached).
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*
* @return {@link listxattrResponse}
*
* @remark Ownership is transferred to the caller.
*/
public listxattrResponse listXAttrs(UserCredentials userCredentials, String path)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Returns the list of extended attributes stored for "path" (Set "useCache" to false to make sure no
* cached entries are returned).
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
* @param useCache
* Set to false to fetch the attributes from the MRC.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*
* @remark Ownership is transferred to the caller.
*/
public listxattrResponse listXAttrs(UserCredentials userCredentials, String path,
boolean useCache) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Sets the extended attribute "name" of "path" to "value".
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
* @param name
* Name of the extended attribute.
* @param value
* Value of the extended attribute.
* @param flags
* May be XATTR_CREATE or XATTR_REPLACE.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void setXAttr(UserCredentials userCredentials, String path, String name, String value,
XATTR_FLAGS flags) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Sets the extended attribute "name" of "path" to "value".
*
* @param userCredentials
* Name and Groups of the user.
* @param auth
* Authentication data, e.g. of type AUTH_PASSWORD.
* @param path
* Path to the file/directory.
* @param name
* Name of the extended attribute.
* @param value
* Value of the extended attribute.
* @param flags
* May be XATTR_CREATE or XATTR_REPLACE.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void setXAttr(UserCredentials userCredentials, Auth auth, String path, String name, String value,
XATTR_FLAGS flags) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Returns value for an XAttribute with "name" stored for "path" in "value".
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
* @param name
* Name of the extended attribute.
* @return String Will contain the content of the extended attribute. NULL if attribute was not found.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*
*/
public String getXAttr(UserCredentials userCredentials, String path, String name)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Writes the size of a value (string size without null-termination) of an XAttribute "name" stored for
* "path" in "size".
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
* @param name
* Name of the extended attribute.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*
* @return true if the attribute was found.
*/
public int getXAttrSize(UserCredentials userCredentials, String path, String name)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Removes the extended attribute "name", stored for "path".
*
* @param userCredentials
* Name and Groups of the user.
* @param path
* Path to the file/directory.
* @param name
* Name of the extended attribute.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void removeXAttr(UserCredentials userCredentials, String path, String name)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Adds a new replica for the file at "path" and triggers the replication of this replica if it's a full
* replica.
*
* Please note, in case of a read-only replica the replication flags of newReplica must contain a
* replication strategy flag.
* For a partial replica, use {@link REPL_FLAG.REPL_FLAG_STRATEGY_SEQUENTIAL_PREFETCHING}.
* If you create a full replica (with the flag {@link REPL_FLAG.REPL_FLAG_FULL_REPLICA}),
* use the strategy {@link REPL_FLAG.REPL_FLAG_STRATEGY_RAREST_FIRST}.
*
* @param userCredentials
* Username and groups of the user.
* @param path
* Path to the file.
* @param newReplica
* Description of the new replica to be added.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
* */
public void addReplica(UserCredentials userCredentials, String path, Replica newReplica)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Return the list of replicas of the file at "path".
*
* @param userCredentials
* Username and groups of the user.
* @param path
* Path to the file.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*
* @remark Ownership is transferred to the caller.
*/
public Replicas listReplicas(UserCredentials userCredentials, String path) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
* Removes the replica of file at "path" located on the OSD with the UUID "osdUuid" (which has to be the
* head OSD in case of striping).
*
* @param userCredentials
* Username and groups of the user.
* @param path
* Path to the file.
* @param osdUuid
* UUID of the OSD from which the replica will be deleted.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void removeReplica(UserCredentials userCredentials, String path, String osdUuid)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Returns a list of all available OSDs where the file (described by "path") can be placed.
*
* @param userCredentials
* Username and groups of the user.
* @param path
* Path to the file.
* @param numberOfOsds
* Number of OSDs required in a valid group. This is only relevant for grouping and will be ignored by
* filtering and sorting policies.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public List<String> getSuitableOSDs(UserCredentials userCredentials, String path,
int numberOfOsds) throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Sets the default replication policy for "directory".
*
* @param userCredentials
* Username and groups of the user.
* @param directory
* Path of the directory.
* @param replicationPolicy
* Replication policy which is defined in {@link ReplicaUpdatePolicies}
* @param replicationFactor
* Number of replicas that should be assigned to new files.
* @param replicationFlags
* Replication flags as number. Defined in {@link REPL_FLAG}.
* Use the helper functions available in {@link ReplicationFlags}.
*
* @throws AddressToUUIDNotFoundException
* @throws {@link IOException}
* @throws PosixErrorException
*/
public void setDefaultReplicationPolicy(UserCredentials userCredentials, String directory,
String replicationPolicy, int replicationFactor, int replicationFlags) throws IOException,
PosixErrorException, AddressToUUIDNotFoundException;
/**
* Gets the default replication policy for "directory".
*
* @param userCredentials
* Username and groups of the user.
* @param directory
* Path of the directory.
* @return {@link ReplicationPolicy}
* @throws IOException
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public ReplicationPolicy getDefaultReplicationPolicy(UserCredentials userCredentials, String directory)
throws IOException, PosixErrorException, AddressToUUIDNotFoundException;
/**
* Returns a list of {@link StripeLocation} where each stripe of the file is located. To determine where the a
* particular stripe is located the UUIDs of all replicas which have a copy of this stripe will be collected and
* resolved to hostnames. If a uuid can't be resolved it will be deleted from the list because HDFS can't handle IP
* addresses.
*
* @param userCredentials
* Username and groups of the user.
* @param path
* Path of the file.
* @param startSize
* Size in byte where to start collecting the {@link StripeLocation}s.
* @param length
* The length of the part of the file where the {@link StripeLocation}s should be collected in
* byte.
* @return {@link List} of {@link StripeLocation}
*
* @throws IOException
* @throws PosixErrorException
* @throws AddressToUUIDNotFoundException
*/
public List<StripeLocation> getStripeLocations(UserCredentials userCredentials, String path,
long startSize, long length) throws IOException, PosixErrorException,
AddressToUUIDNotFoundException;
/**
* Removes the user from the ACL stored in path
*
* @param userCreds
* Username and groups of the user.
* @param path
* The path on the volume where the ACL is stored
* @param user
* The user to remove access rights
* @throws IOException
*/
public void removeACL(UserCredentials userCreds, String path, String user) throws IOException;
/**
* Removes all provided users from the ACL stored in path
*
* @param userCreds
* Username and groups of the user.
* @param path
* The path on the volume where the ACL is stored
* @param aclEntries
* Set of acl entries to remove
* @throws IOException
*/
public void removeACL(UserCredentials userCreds, String path, Set<String> aclEntries) throws IOException;
/**
* Adds the user to the ACL for the provided path
*
* @param userCreds
* Username and groups of the user.
* @param path
* The path on the volume where the ACL is stored
* @param user
* The user to remove access rights
* @param accessrights
* The accessrights to be set for the user. I.e. rwx, rx, rw, ...
* @throws IOException
*/
public void setACL(UserCredentials userCreds, String path, String user, String accessrights)
throws IOException;
/**
* Adds all users to the ACL for the provided path
*
* @param userCreds
* Username and groups of the user.
* @param path
* The path on the volume where the ACL is stored
* @param aclEntries
* ACL entries to set
* @throws IOException
*/
public void setACL(UserCredentials userCreds, String path, Map<String, Object> aclEntries)
throws IOException;
/**
* Returns all users in the ACL for the provided path
*
* @param userCreds
* Username and groups of the user.
* @param path
* The path on the volume where the ACL is stored
* @throws IOException
*/
public Map<String, Object> listACL(UserCredentials userCreds, String path) throws IOException;
/** Get the OSD selection policies of the volume (replica placement).
*
* @param userCreds Username and groups of the user.
*
* @return List of policies as comma separated string.
* @throws IOException
*/
public String getOSDSelectionPolicy(UserCredentials userCreds) throws IOException;
/** Set the OSD selection policies for the volume (replica placement).
*
* @param userCreds Username and groups of the user.
* @param policies List of policies as comma separated string.
* @throws IOException
*/
public void setOSDSelectionPolicy(UserCredentials userCreds, String policies) throws IOException;
/** Get the Replica selection policies of the volume (replica selection).
*
*
* @param userCreds Username and groups of the user.
*
* @return List of policies as comma separated string.
* @throws IOException
*/
public String getReplicaSelectionPolicy(UserCredentials userCreds) throws IOException;
/** Set the Replica selection policies for the volume (replica selection).
*
* @param userCreds Username and groups of the user.
* @param policies List of policies as comma separated string.
* @throws IOException
*/
public void setReplicaSelectionPolicy(UserCredentials userCreds, String policies) throws IOException;
/** Set attribute of a policy to further customize replica placement and
* selection. See the user guide for more information.
*
* @param userCreds Username and groups of the user.
* @param attribute Format: <policy id>.<attribute name> e.g., "1001.domains"
* @param value Value of the attribute.
* @throws IOException
*/
public void setPolicyAttribute(UserCredentials userCreds, String attribute, String value) throws IOException;
/**
* Get the name of the volume.
* @return The name of the volume.
*/
public String getVolumeName();
/**
* Used only for Hadoop Interface.
*
* Encapsulates information about one stripe, i.e. the size in kb where the stripe begins, the length of
* the stripe and lists of hostnames and corresponding uuids where the stripe is located. Hostnames are
* usually the ones which are configured through the "hostname = " option of the OSD. Otherwise it is the
* resolved hostname of registred IP address at the DIR.
*
*/
public class StripeLocation {
private final long startSize;
private final long length;
private final String[] uuids;
/**
* The hostname as configured with "hostname = " parameter of the OSD or otherwise the resolved
* hostname from the IP address registered at DIR.
*/
private final String[] hostnames;
protected StripeLocation(long startSize, long length, String[] uuids, String[] hostnames) {
this.startSize = startSize;
this.length = length;
this.uuids = uuids;
this.hostnames = hostnames;
}
public long getStartSize() {
return startSize;
}
public long getLength() {
return length;
}
public String[] getUuids() {
return uuids;
}
public String[] getHostnames() {
return hostnames;
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs;
import org.xtreemfs.foundation.buffer.ReusableBuffer;
/**
*
* <br>
* Nov 2, 2011
*/
public class WriteOperation {
private long objNumber;
private int osdOffset;
private int reqSize;
private int reqOffset;
private ReusableBuffer buf;
protected WriteOperation(long objNumber, int osdOffset, int reqSize, int reqOffset, ReusableBuffer buf) {
this.objNumber = objNumber;
this.osdOffset = osdOffset;
this.reqSize = reqSize;
this.reqOffset = reqOffset;
this.buf = buf;
}
protected long getObjNumber() {
return objNumber;
}
public int getOsdOffset() {
return osdOffset;
}
protected int getReqSize() {
return reqSize;
}
protected int getReqOffset() {
return reqOffset;
}
protected ReusableBuffer getReqData() {
return buf;
}
}

View File

@@ -0,0 +1,14 @@
package org.xtreemfs.common.libxtreemfs.exceptions;
public class AddressToUUIDNotFoundException extends XtreemFSException {
public AddressToUUIDNotFoundException(String uuid) {
super("UUID: service not found for uuid " + uuid);
}
/**
*
*/
private static final long serialVersionUID = 1L;
}

View File

@@ -0,0 +1,26 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs.exceptions;
/**
*
* <br>Nov 22, 2011
*/
public class InternalServerErrorException extends XtreemFSException {
/**
*
*/
private static final long serialVersionUID = 1L;
/**
*
*/
public InternalServerErrorException(String errorMsg) {
super(errorMsg);
}
}

View File

@@ -0,0 +1,17 @@
/*
* Copyright (c) 2012 by Lukas Kairies, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs.exceptions;
public class InvalidChecksumException extends XtreemFSException {
private static final long serialVersionUID = 1L;
public InvalidChecksumException(String errorMsg) {
super(errorMsg);
}
}

View File

@@ -0,0 +1,23 @@
/*
* Copyright (c) 2013 by Johannes Dillmann, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs.exceptions;
import org.xtreemfs.common.libxtreemfs.UUIDIterator;
/**
* This exception is thrown, when a request was denied due to an outdated view (XLocSet). <br>
* The client should reload the view (XLocSet), refresh {@link UUIDIterator}s based on it and retry the request.
**/
public class InvalidViewException extends XtreemFSException {
private static final long serialVersionUID = 1L;
public InvalidViewException(String errorMsg) {
super(errorMsg);
}
}

View File

@@ -0,0 +1,32 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs.exceptions;
import java.io.IOException;
import org.xtreemfs.foundation.pbrpc.generatedinterfaces.RPC.POSIXErrno;
public class PosixErrorException extends XtreemFSException {
/**
*
*/
private static final long serialVersionUID = 1L;
private POSIXErrno posixError;
public PosixErrorException(String message) {
super(message);
}
public PosixErrorException(POSIXErrno posixError, String message) {
super(message);
this.posixError = posixError;
}
public POSIXErrno getPosixError() {
return this.posixError;
}
}

View File

@@ -0,0 +1,19 @@
/*
* Copyright (c) 2008-2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs.exceptions;
/**
* {@link Exception} that is thrown when the UUIDIterator list reached its end.
* <br>Sep 3, 2011
*/
@SuppressWarnings("serial")
public class UUIDIteratorListIsEmpyException extends XtreemFSException {
public UUIDIteratorListIsEmpyException(String message) {
super(message);
}
}

View File

@@ -0,0 +1,24 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs.exceptions;
/**
*
* Thrown if a given UUID was not found in the xlocset of a file.
*/
public class UUIDNotInXlocSetException extends XtreemFSException {
/**
* @param errorMsg
*/
public UUIDNotInXlocSetException(String errorMsg) {
super(errorMsg);
}
private static final long serialVersionUID = 1L;
}

View File

@@ -0,0 +1,24 @@
/*
* Copyright (c) 2008-2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs.exceptions;
/**
*
* <br>
* Sep 2, 2011
*/
@SuppressWarnings("serial")
public class VolumeNotFoundException extends XtreemFSException {
/**
*
*/
public VolumeNotFoundException(String volumeName) {
super("Volume not found: " + volumeName);
}
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2011 by Paul Seiferth, Zuse Institute Berlin
*
* Licensed under the BSD License, see LICENSE file for details.
*
*/
package org.xtreemfs.common.libxtreemfs.exceptions;
import java.io.IOException;
/**
*
* <br>
* Nov 22, 2011
*/
public class XtreemFSException extends IOException {
/**
*
*/
private static final long serialVersionUID = 1L;
/**
*
*/
public XtreemFSException(String errorMsg) {
super(errorMsg);
}
}

Some files were not shown because too many files have changed in this diff Show More