diff --git a/autogen.sh b/autogen.sh index f90c49de20a3c..83c0104450d20 100755 --- a/autogen.sh +++ b/autogen.sh @@ -1,5 +1,7 @@ #!/bin/sh +set -e + check_for_pkg_config() { which pkg-config >/dev/null && return @@ -11,10 +13,20 @@ check_for_pkg_config() { exit 1 } +if [ `which libtoolize` ]; then + LIBTOOLIZE=libtoolize +elif [ `which glibtoolize` ]; then + LIBTOOLIZE=glibtoolize +else + echo "Error: could not find libtoolize" + echo " Please install libtoolize or glibtoolize." + exit 1 +fi + rm -f config.cache aclocal -I m4 --install check_for_pkg_config -libtoolize --force --copy +$LIBTOOLIZE --force --copy autoconf autoheader automake -a --add-missing -Wall diff --git a/configure.ac b/configure.ac index c344e7d6134b6..01eef82ee875c 100644 --- a/configure.ac +++ b/configure.ac @@ -8,7 +8,7 @@ AC_PREREQ(2.59) # VERSION define is not used by the code. It gets a version string # from 'git describe'; see src/ceph_ver.[ch] -AC_INIT([ceph], [0.72-rc1], [ceph-devel@vger.kernel.org]) +AC_INIT([ceph], [0.72], [ceph-devel@vger.kernel.org]) # Create release string. Used with VERSION for RPMs. RPM_RELEASE=0 @@ -113,6 +113,10 @@ AM_CONDITIONAL([ENABLE_FPU_NEON], [case $target_cpu in arm*) true;; *) false;; e AX_CHECK_COMPILE_FLAG([-fvar-tracking-assignments], [HAS_VTA_SUPPORT=1], [HAS_VTA_SUPPORT=0]) AM_CONDITIONAL(COMPILER_HAS_VTA, [test "$HAS_VTA_SUPPORT" = 1]) +AX_CXX_STATIC_CAST +AX_C_VAR_FUNC +AX_C_PRETTY_FUNC + # Checks for libraries. ACX_PTHREAD AC_CHECK_LIB([uuid], [uuid_parse], [true], AC_MSG_FAILURE([libuuid not found])) @@ -535,9 +539,23 @@ AC_CHECK_FUNC([fallocate], [AC_DEFINE([CEPH_HAVE_FALLOCATE], [], [fallocate(2) is supported])], []) +# +# Test for time-related `struct stat` members. +# + +AC_CHECK_MEMBER([struct stat.st_mtim.tv_nsec], + [AC_DEFINE(HAVE_STAT_ST_MTIM_TV_NSEC, 1, + [Define if you have struct stat.st_mtim.tv_nsec])]) + +AC_CHECK_MEMBER([struct stat.st_mtimespec.tv_nsec], + [AC_DEFINE(HAVE_STAT_ST_MTIMESPEC_TV_NSEC, 1, + [Define if you have struct stat.st_mtimespec.tv_nsec])]) +AC_CHECK_HEADERS([arpa/nameser_compat.h]) AC_CHECK_HEADERS([sys/prctl.h]) AC_CHECK_FUNCS([prctl]) +AC_CHECK_FUNCS([pipe2]) +AC_CHECK_FUNCS([posix_fadvise]) # Checks for typedefs, structures, and compiler characteristics. #AC_HEADER_STDBOOL diff --git a/debian/changelog b/debian/changelog index f6575c29a726c..dd2dbb95ce4f6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +ceph (0.72-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Thu, 07 Nov 2013 20:25:18 +0000 + ceph (0.72-rc1-1) stable; urgency=low * New upstream release diff --git a/doc/install/cpu-profiler.rst b/doc/dev/cpu-profiler.rst similarity index 100% rename from doc/install/cpu-profiler.rst rename to doc/dev/cpu-profiler.rst diff --git a/doc/images/region-zone-sync.png b/doc/images/region-zone-sync.png new file mode 100644 index 0000000000000..adafd40cfbc2f Binary files /dev/null and b/doc/images/region-zone-sync.png differ diff --git a/doc/images/zone-sync.png b/doc/images/zone-sync.png new file mode 100644 index 0000000000000..6289feab4503b Binary files /dev/null and b/doc/images/zone-sync.png differ diff --git a/doc/install/build-ceph.rst b/doc/install/build-ceph.rst new file mode 100644 index 0000000000000..d77d7f885098a --- /dev/null +++ b/doc/install/build-ceph.rst @@ -0,0 +1,176 @@ +============ + Build Ceph +============ + +You can get Ceph software by retrieving Ceph source code and building it yourself. +To build Ceph, you need to set up a development environment, compile Ceph, +and then either install in user space or build packages and install the packages. + +Build Prerequisites +=================== + + +.. tip:: Check this section to see if there are specific prerequisites for your + Linux/Unix distribution. + +Before you can build Ceph source code, you need to install several libraries +and tools. Ceph provides ``autoconf`` and ``automake`` scripts to get you +started quickly. Ceph build scripts depend on the following: + +- ``autotools-dev`` +- ``autoconf`` +- ``automake`` +- ``cdbs`` +- ``gcc`` +- ``g++`` +- ``git`` +- ``libboost-dev`` +- ``libedit-dev`` +- ``libssl-dev`` +- ``libtool`` +- ``libfcgi`` +- ``libfcgi-dev`` +- ``libfuse-dev`` +- ``linux-kernel-headers`` +- ``libcrypto++-dev`` +- ``libcrypto++`` +- ``libexpat1-dev`` +- ``pkg-config`` +- ``libcurl4-gnutls-dev`` + +On Ubuntu, execute ``sudo apt-get install`` for each dependency that isn't +installed on your host. :: + + sudo apt-get install autotools-dev autoconf automake cdbs gcc g++ git libboost-dev libedit-dev libssl-dev libtool libfcgi libfcgi-dev libfuse-dev linux-kernel-headers libcrypto++-dev libcrypto++ libexpat1-dev + +On Debian/Squeeze, execute ``aptitude install`` for each dependency that isn't +installed on your host. :: + + aptitude install autotools-dev autoconf automake cdbs gcc g++ git libboost-dev libedit-dev libssl-dev libtool libfcgi libfcgi-dev libfuse-dev linux-kernel-headers libcrypto++-dev libcrypto++ libexpat1-dev pkg-config libcurl4-gnutls-dev + +On Debian/Wheezy, you may also need:: + + keyutils-dev libaio libboost-thread-dev + +.. note:: Some distributions that support Google's memory profiler tool may use + a different package name (e.g., ``libgoogle-perftools4``). + +Ubuntu +------ + +- ``uuid-dev`` +- ``libkeyutils-dev`` +- ``libgoogle-perftools-dev`` +- ``libatomic-ops-dev`` +- ``libaio-dev`` +- ``libgdata-common`` +- ``libgdata13`` +- ``libsnappy-dev`` +- ``libleveldb-dev`` + +Execute ``sudo apt-get install`` for each dependency that isn't installed on +your host. :: + + sudo apt-get install uuid-dev libkeyutils-dev libgoogle-perftools-dev libatomic-ops-dev libaio-dev libgdata-common libgdata13 libsnappy-dev libleveldb-dev + + +Debian +------ + +Alternatively, you may also install:: + + aptitude install fakeroot dpkg-dev + aptitude install debhelper cdbs libexpat1-dev libatomic-ops-dev + +openSUSE 11.2 (and later) +------------------------- + +- ``boost-devel`` +- ``gcc-c++`` +- ``libedit-devel`` +- ``libopenssl-devel`` +- ``fuse-devel`` (optional) + +Execute ``zypper install`` for each dependency that isn't installed on your +host. :: + + zypper install boost-devel gcc-c++ libedit-devel libopenssl-devel fuse-devel + + + +Build Ceph +========== + +Ceph provides ``automake`` and ``configure`` scripts to streamline the build +process. To build Ceph, navigate to your cloned Ceph repository and execute the +following:: + + cd ceph + ./autogen.sh + ./configure + make + +.. topic:: Hyperthreading + + You can use ``make -j`` to execute multiple jobs depending upon your system. For + example, ``make -j4`` for a dual core processor may build faster. + +See `Installing a Build`_ to install a build in user space. + +Build Ceph Packages +=================== + +To build packages, you must clone the `Ceph`_ repository. You can create +installation packages from the latest code using ``dpkg-buildpackage`` for +Debian/Ubuntu or ``rpmbuild`` for the RPM Package Manager. + +.. tip:: When building on a multi-core CPU, use the ``-j`` and the number of + cores * 2. For example, use ``-j4`` for a dual-core processor to accelerate + the build. + + +Advanced Package Tool (APT) +--------------------------- + +To create ``.deb`` packages for Debian/Ubuntu, ensure that you have cloned the +`Ceph`_ repository, installed the `Build Prerequisites`_ and installed +``debhelper``:: + + sudo apt-get install debhelper + +Once you have installed debhelper, you can build the packages:: + + sudo dpkg-buildpackage + +For multi-processor CPUs use the ``-j`` option to accelerate the build. + + +RPM Package Manager +------------------- + +To create ``.rpm`` packages, ensure that you have cloned the `Ceph`_ repository, +installed the `Build Prerequisites`_ and installed ``rpm-build`` and +``rpmdevtools``:: + + yum install rpm-build rpmdevtools + +Once you have installed the tools, setup an RPM compilation environment:: + + rpmdev-setuptree + +Fetch the source tarball for the RPM compilation environment:: + + wget -P ~/rpmbuild/SOURCES/ http://ceph.com/download/ceph-.tar.gz + +Or from the EU mirror:: + + wget -P ~/rpmbuild/SOURCES/ http://eu.ceph.com/download/ceph-.tar.gz + +Build the RPM packages:: + + rpmbuild -tb ~/rpmbuild/SOURCES/ceph-.tar.gz + +For multi-processor CPUs use the ``-j`` option to accelerate the build. + +.. _Ceph: ../clone-source +.. _Installing a Build: ../install-storage-cluster#installing-a-build diff --git a/doc/install/build-packages.rst b/doc/install/build-packages.rst deleted file mode 100644 index 2e5662099d26c..0000000000000 --- a/doc/install/build-packages.rst +++ /dev/null @@ -1,56 +0,0 @@ -===================== - Build Ceph Packages -===================== - -To build packages, you must clone the `Ceph`_ repository. You can create -installation packages from the latest code using ``dpkg-buildpackage`` for -Debian/Ubuntu or ``rpmbuild`` for the RPM Package Manager. - -.. tip:: When building on a multi-core CPU, use the ``-j`` and the number of - cores * 2. For example, use ``-j4`` for a dual-core processor to accelerate - the build. - -Advanced Package Tool (APT) -=========================== - -To create ``.deb`` packages for Debian/Ubuntu, ensure that you have cloned the -`Ceph`_ repository, installed the `build prerequisites`_ and installed -``debhelper``:: - - sudo apt-get install debhelper - -Once you have installed debhelper, you can build the packages:: - - sudo dpkg-buildpackage - -For multi-processor CPUs use the ``-j`` option to accelerate the build. - -RPM Package Manager -=================== - -To create ``.rpm`` packages, ensure that you have cloned the `Ceph`_ repository, -installed the `build prerequisites`_ and installed ``rpm-build`` and -``rpmdevtools``:: - - yum install rpm-build rpmdevtools - -Once you have installed the tools, setup an RPM compilation environment:: - - rpmdev-setuptree - -Fetch the source tarball for the RPM compilation environment:: - - wget -P ~/rpmbuild/SOURCES/ http://ceph.com/download/ceph-.tar.gz - -Or from the EU mirror:: - - wget -P ~/rpmbuild/SOURCES/ http://eu.ceph.com/download/ceph-.tar.gz - -Build the RPM packages:: - - rpmbuild -tb ~/rpmbuild/SOURCES/ceph-.tar.gz - -For multi-processor CPUs use the ``-j`` option to accelerate the build. - -.. _build prerequisites: ../build-prerequisites -.. _Ceph: ../clone-source diff --git a/doc/install/build-prerequisites.rst b/doc/install/build-prerequisites.rst deleted file mode 100644 index bdb409729ba06..0000000000000 --- a/doc/install/build-prerequisites.rst +++ /dev/null @@ -1,90 +0,0 @@ -===================== - Build Prerequisites -===================== - -.. tip:: Check this section to see if there are specific prerequisites for your - Linux/Unix distribution. - -Before you can build Ceph source code, you need to install several libraries -and tools. Ceph provides ``autoconf`` and ``automake`` scripts to get you -started quickly. Ceph build scripts depend on the following: - -- ``autotools-dev`` -- ``autoconf`` -- ``automake`` -- ``cdbs`` -- ``gcc`` -- ``g++`` -- ``git`` -- ``libboost-dev`` -- ``libedit-dev`` -- ``libssl-dev`` -- ``libtool`` -- ``libfcgi`` -- ``libfcgi-dev`` -- ``libfuse-dev`` -- ``linux-kernel-headers`` -- ``libcrypto++-dev`` -- ``libcrypto++`` -- ``libexpat1-dev`` -- ``pkg-config`` -- ``libcurl4-gnutls-dev`` - -On Ubuntu, execute ``sudo apt-get install`` for each dependency that isn't -installed on your host. :: - - sudo apt-get install autotools-dev autoconf automake cdbs gcc g++ git libboost-dev libedit-dev libssl-dev libtool libfcgi libfcgi-dev libfuse-dev linux-kernel-headers libcrypto++-dev libcrypto++ libexpat1-dev - -On Debian/Squeeze, execute ``aptitude install`` for each dependency that isn't -installed on your host. :: - - aptitude install autotools-dev autoconf automake cdbs gcc g++ git libboost-dev libedit-dev libssl-dev libtool libfcgi libfcgi-dev libfuse-dev linux-kernel-headers libcrypto++-dev libcrypto++ libexpat1-dev pkg-config libcurl4-gnutls-dev - -On Debian/Wheezy, you may also need:: - - keyutils-dev libaio libboost-thread-dev - -.. note:: Some distributions that support Google's memory profiler tool may use - a different package name (e.g., ``libgoogle-perftools4``). - -Ubuntu -====== - -- ``uuid-dev`` -- ``libkeyutils-dev`` -- ``libgoogle-perftools-dev`` -- ``libatomic-ops-dev`` -- ``libaio-dev`` -- ``libgdata-common`` -- ``libgdata13`` -- ``libsnappy-dev`` -- ``libleveldb-dev`` - -Execute ``sudo apt-get install`` for each dependency that isn't installed on -your host. :: - - sudo apt-get install uuid-dev libkeyutils-dev libgoogle-perftools-dev libatomic-ops-dev libaio-dev libgdata-common libgdata13 libsnappy-dev libleveldb-dev - - -Debian -====== - -Alternatively, you may also install:: - - aptitude install fakeroot dpkg-dev - aptitude install debhelper cdbs libexpat1-dev libatomic-ops-dev - -openSUSE 11.2 (and later) -========================= - -- ``boost-devel`` -- ``gcc-c++`` -- ``libedit-devel`` -- ``libopenssl-devel`` -- ``fuse-devel`` (optional) - -Execute ``zypper install`` for each dependency that isn't installed on your -host. :: - - zypper install boost-devel gcc-c++ libedit-devel libopenssl-devel fuse-devel - diff --git a/doc/install/building-ceph.rst b/doc/install/building-ceph.rst deleted file mode 100644 index e8b4b3551b78a..0000000000000 --- a/doc/install/building-ceph.rst +++ /dev/null @@ -1,29 +0,0 @@ -=============== - Building Ceph -=============== - -Ceph provides ``automake`` and ``configure`` scripts to streamline the build -process. To build Ceph, navigate to your cloned Ceph repository and execute the -following:: - - cd ceph - ./autogen.sh - ./configure - make - -.. topic:: Hyperthreading - - You can use ``make -j`` to execute multiple jobs depending upon your system. For - example, ``make -j4`` for a dual core processor may build faster. - - -To install Ceph locally, you may also use:: - - sudo make install - -If you install Ceph locally, ``make`` will place the executables in -``usr/local/bin``. You may add the Ceph configuration file to the -``usr/local/bin`` directory to run an evaluation environment of Ceph from a -single directory. - -.. _Memory Profiling: ../../rados/operations/memory-profiling \ No newline at end of file diff --git a/doc/install/calxeda.rst b/doc/install/calxeda.rst deleted file mode 100644 index e7cb1697a4ea7..0000000000000 --- a/doc/install/calxeda.rst +++ /dev/null @@ -1,74 +0,0 @@ -======================= - Installing on Calxeda -======================= - -The Calxeda partnership with Inktank brings the Ceph Distributed Storage System -to Calxeda hardware. This document describes how to install Ceph development -packages on Calxeda hardware. - -Ceph on Calxeda uses Debian/Ubuntu Linux. At this time, gitbuilder builds -development packages for Calxeda on the Quantal Quetzal (i.e., 12.10) version of -Ubuntu. The installation process for Ceph on Calxeda is almost identical to the -process for installing Ceph packages on `Debian/Ubuntu`_. - - -Install Key -=========== - -Packages are cryptographically signed with the ``autobuild.asc`` key. Add the -Ceph autobuild key to your system's list of trusted keys to avoid a security -warning:: - - wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc' | sudo apt-key add - - - -Add Package -=========== - -Add the Ceph package repository to your system's list of APT sources, but -replace ``{BRANCH}`` with the branch you'd like to use (e.g., ``master``, -``stable``, ``next``, ``wip-hack``). :: - - echo deb http://gitbuilder.ceph.com/ceph-deb-$(lsb_release -sc)-armv7l-basic/ref/{BRANCH} $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - sudo apt-get update - - -Prerequisites -============= - -Ceph on Calxeda requires Google's memory profiling tools (``google-perftools``). -The Ceph repository should have a copy at -http://ceph.com/packages/google-perftools/debian. :: - - echo deb http://ceph.com/packages/google-perftools/debian $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/google-perftools.list - sudo apt-get update - sudo apt-get install google-perftools - - -Install Ceph -============ - -Once you have added development packages to APT and installed Google -memory profiling tools, you should update APT's database and install Ceph:: - - sudo apt-get update - sudo apt-get install ceph - -You may also use ceph-deploy to install Ceph, but you must add the key and -packages and install ``google-perftools`` first. - - -Ceph Object Storage Packages -============================ - -If you intend to run Ceph Object Storage on Calxeda hardware, you should add the -``apache2`` and ``fastcgi`` packages **before** installing Ceph Object Storage -components. :: - - echo deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-quantal-arm7l-basic | sudo tee /etc/apt/sources.list.d/fastcgi.list - echo deb http://gitbuilder.ceph.com/apache2-deb-$(lsb_release -sc)-arm7l-basic/ref/ master $(lsb_release -sc) | sudo tee /etc/apt/sources.list.d/apache2.list - -Once you have added these packages, you may install Ceph Object Storage on Calxeda -hardware. - -.. _Debian/Ubuntu: ../debian \ No newline at end of file diff --git a/doc/install/clone-source.rst b/doc/install/clone-source.rst index ad7dce4e5b4f9..56ce52f04e543 100644 --- a/doc/install/clone-source.rst +++ b/doc/install/clone-source.rst @@ -2,9 +2,58 @@ Cloning the Ceph Source Code Repository ========================================= -To clone the source, you must install Git. See `Set Up Git`_ for details. +You may clone a Ceph branch of the Ceph source code by going to `github Ceph +Repository`_, selecting a branch (``master`` by default), and clicking the +**Download ZIP** button. -.. _Set Up Git: ../git +.. _github Ceph Repository: https://github.com/ceph/ceph + + +To clone the entire git repository, install and configure ``git``. + + +Install Git +=========== + +To install ``git``, execute:: + + sudo apt-get install git + +You must also have a ``github`` account. If you do not have a +``github`` account, go to `github.com`_ and register. +Follow the directions for setting up git at +`Set Up Git`_. + +.. _github.com: http://github.com +.. _Set Up Git: http://help.github.com/linux-set-up-git + + +Add SSH Keys (Optional) +======================= + +If you intend to commit code to Ceph or to clone using SSH +(``git@github.com:ceph/ceph.git``), you must generate SSH keys for github. + +.. tip:: If you only intend to clone the repository, you may + use ``git clone --recursive https://github.com/ceph/ceph.git`` + without generating SSH keys. + +To generate SSH keys for ``github``, execute:: + + ssh-keygen + +Get the key to add to your ``github`` account (the following example +assumes you used the default file path):: + + cat .ssh/id_rsa.pub + +Copy the public key. + +Go to your your ``github`` account, click on "Account Settings" (i.e., the +'tools' icon); then, click "SSH Keys" on the left side navbar. + +Click "Add SSH key" in the "SSH Keys" list, enter a name for the key, paste the +key you generated, and press the "Add key" button. Clone the Source @@ -43,5 +92,4 @@ development branch. You may choose other branches too. :: - git checkout master diff --git a/doc/install/contributing.rst b/doc/install/contributing.rst deleted file mode 100644 index 87b5220b7744e..0000000000000 --- a/doc/install/contributing.rst +++ /dev/null @@ -1,8 +0,0 @@ -========================== - Contributing Source Code -========================== - -If you are making source contributions to the Ceph project, -you must be added to the `Ceph project`_ on github. - -.. _Ceph project: https://github.com/ceph \ No newline at end of file diff --git a/doc/install/debian.rst b/doc/install/debian.rst deleted file mode 100644 index c0c4dcb4205ef..0000000000000 --- a/doc/install/debian.rst +++ /dev/null @@ -1,150 +0,0 @@ -=================================== - Installing Debian/Ubuntu Packages -=================================== - -You may install stable release packages (for stable deployments), -development release packages (for the latest features), or development -testing packages (for development and QA only). Do not add multiple -package sources at the same time. - -Install Release Key -=================== - -Packages are cryptographically signed with the ``release.asc`` key. -Add our release key to your system's list of trusted keys to avoid a -security warning:: - - wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add - - -Add Release Packages -==================== - - -Dumpling --------- - -Dumpling is the most recent major release of Ceph. These packages are -recommended for anyone deploying Ceph in a production environment. -Critical bug fixes are backported and point releases are made as -necessary. - -Add our package repository to your system's list of APT sources. -See `the dumpling Debian repository`_ for a complete list of Debian and Ubuntu releases -supported. :: - - echo deb http://ceph.com/debian-dumpling/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - -For the European users there is also a mirror in the Netherlands at http://eu.ceph.com/ :: - - echo deb http://eu.ceph.com/debian-dumpling/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - - - -Cuttlefish ----------- - -Cuttlefish is the previous major release of Ceph. These packages are -recommended for those who have already deployed bobtail in production and are -not yet ready to upgrade. - -Add our package repository to your system's list of APT sources. -See `the cuttlefish Debian repository`_ for a complete list of Debian and Ubuntu releases -supported. :: - - echo deb http://ceph.com/debian-cuttlefish/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - -For the European users there is also a mirror in the Netherlands at http://eu.ceph.com/ :: - - echo deb http://eu.ceph.com/debian-cuttlefish/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - - -Bobtail -------- - -Bobtail is the second major release of Ceph. These packages are -recommended for those who have already deployed bobtail in production and -are not yet ready to upgrade. - -Add our package repository to your system's list of APT sources. -See `the bobtail Debian repository`_ for a complete list of Debian and Ubuntu releases -supported. :: - - echo deb http://ceph.com/debian-bobtail/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - -For the European users there is also a mirror in the Netherlands at http://eu.ceph.com/ :: - - echo deb http://eu.ceph.com/debian-bobtail/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - -Argonaut --------- - -Argonaut is the first major release of Ceph. These packages are -recommended for those who have already deployed Argonaut in production -and are not yet ready to upgrade. - -Add our package repository to your system's list of APT sources. See -`the argonaut Debian repository`_ for a complete list of Debian and Ubuntu releases -supported. :: - - echo deb http://ceph.com/debian-argonaut/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - -For the European users there is also a mirror in the Netherlands at http://eu.ceph.com/ :: - - echo deb http://eu.ceph.com/debian-argonaut/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - - -Development Release Packages ----------------------------- - -Our development process generates a new release of Ceph every 3-4 -weeks. These packages are faster-moving than the stable releases, as -they get new features integrated quickly, while still undergoing -several weeks of QA prior to release. - -Add our package repository to your system's list of APT sources. See -`the testing Debian repository`_ for a complete list of Debian and Ubuntu releases -supported. :: - - echo deb http://ceph.com/debian-testing/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - -For the European users there is also a mirror in the Netherlands at http://eu.ceph.com/ :: - - echo deb http://eu.ceph.com/debian-testing/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - - -Development Testing Packages ----------------------------- - -We automatically build Debian and Ubuntu packages for current -development branches in the Ceph source code repository. These -packages are intended for developers and QA only. - -Packages are cryptographically signed with the ``autobuild.asc`` key. -Add our autobuild key to your system's list of trusted keys to avoid a -security warning:: - - wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc' | sudo apt-key add - - -Add our package repository to your system's list of APT sources, but -replace ``{BRANCH}`` with the branch you'd like to use (e.g., chef-3, -wip-hack, master, stable). See `the gitbuilder page`_ for a complete -list of distributions we build. :: - - echo deb http://gitbuilder.ceph.com/ceph-deb-$(lsb_release -sc)-x86_64-basic/ref/{BRANCH} $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list - - -Installing Packages -=================== - -Once you have added either release or development packages to APT, -you should update APT's database and install Ceph:: - - sudo apt-get update && sudo apt-get install ceph - - -.. _the dumpling Debian repository: http://ceph.com/debian-dumpling/dists -.. _the cuttlefish Debian repository: http://ceph.com/debian-cuttlefish/dists -.. _the bobtail Debian repository: http://ceph.com/debian-bobtail/dists -.. _the argonaut Debian repository: http://ceph.com/debian-argonaut/dists -.. _the testing Debian repository: http://ceph.com/debian-testing/dists -.. _the gitbuilder page: http://gitbuilder.ceph.com diff --git a/doc/install/get-packages.rst b/doc/install/get-packages.rst new file mode 100644 index 0000000000000..0854253f3fbe2 --- /dev/null +++ b/doc/install/get-packages.rst @@ -0,0 +1,520 @@ +============== + Get Packages +============== + +To install Ceph and other enabling software, you need to retrieve packages from +the Ceph repository. Follow this guide to get packages; then, proceed to the +`Install Ceph Object Storage`_. + + +Getting Packages +================ + +There are two ways to get packages: + +- **Add Repositories:** Adding repositories is the easiest way to get packages, + because package management tools will retrieve the packages and all enabling + software for you in most cases. However, to use this approach, each + :term:`Ceph Node` in your cluster must have internet access. + +- **Download Packages Manually:** Retrieving packages to install Ceph provides + high security environments that do not allow a :term:`Ceph Node` to access + the internet to download packages. You may also download packages to mirror + the Ceph repository. This approach is not as convenient, but provides a means + of installing Ceph in high security environments (e.g., banking, health care, + etc.). + + +Requirements +============ + +All Ceph deployments require Ceph packages (except for development). You should +also add keys and recommended packages. + +- **Keys: (Recommended)** Whether you add repositories or download packages + manually, you should download keys to verify the packages. If you do not get + the keys, you may encounter security warnings. There are two keys: one for + releases (common) and one for development (programmers and QA only). Choose + the key that suits your needs. See `Add Keys`_ for details. + +- **Ceph Extras: (Required)** The Ceph Extras repository provides newer + Ceph-enabled versions of packages which are already provided in your Linux + distribution, but where newer versions are required to support Ceph. Examples + of newer versions of available packages include QEMU for CentOS/RHEL + distribution and iSCSI among others. If you intend to use any of the + foregoing packages, you must add the Ceph Extras repository or download the + packages manually. This repository also contains Ceph dependencies for those + who intend to install Ceph manually. See `Add Ceph Extras`_ for details. + +- **Ceph: (Required)** All Ceph deployments require Ceph release packages, + except for deployments that use development packages (development, QA, and + bleeding edge deployments only). See `Add Ceph`_ for details. + +- **Ceph Development: (Optional)** If you are developing for Ceph, testing Ceph + development builds, or if you want features from the bleeding edge of Ceph + development, you may get Ceph development packages. See + `Add Ceph Development`_ for details. + +- **Apache/FastCGI: (Optional)** If you are deploying a + :term:`Ceph Object Storage` service, you must install Apache and FastCGI. + Ceph provides Apache and FastCGI builds that are identical to those available + from Apache, but with 100-continue support. If you want to enable + :term:`Ceph Object Gateway` daemons with 100-continue support, you must + retrieve Apache/FastCGI packages from the Ceph repository. + See `Add Apache/FastCGI`_ for details. + + +If you intend to download packages manually, see Section `Download Packages`_. + + +Add Keys +======== + +Add a key to your system's list of trusted keys to avoid a security warning. For +major releases (e.g., ``cuttlefish``, ``emperor``) and development releases +(``release-name-rc1``, ``release-name-rc2``), use the ``release.asc`` key. For +development testing packages, use the ``autobuild.asc`` key (developers and QA). + + +APT +--- + +To install the ``release.asc`` key, execute the following:: + + wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add - + + +To install the ``autobuild.asc`` key, execute the following +(QA and developers only):: + + wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc' | sudo apt-key add - + + +RPM +--- + +To install the ``release.asc`` key, execute the following:: + + sudo rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' + +To install the ``autobuild.asc`` key, execute the following +(QA and developers only):: + + sudo rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc' + + + +Add Ceph Extras +=============== + +Some Ceph deployments require newer Ceph-enabled versions of packages that are +already available in your Linux distribution. For example, Ceph Extras contains +newer Ceph-enabled packages for the SCSI target framework and QEMU packages for +RPMs. The repository also contains ``curl``, ``leveldb`` and other Ceph +dependencies. Add the Ceph Extras repository to ensure you obtain these +additional packages from the Ceph repository. + + +Debian Packages +--------------- + +Add our Ceph Extras package repository to your system's list of APT sources. :: + + echo deb http://ceph.com/packages/ceph-extras/debian $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph-extras.list + + +RPM Packages +------------ + +For RPM packages, add our package repository to your ``/etc/yum.repos.d`` repos (e.g., +``ceph-extras.repo``). Some Ceph packages (e.g., QEMU) must take priority over standard +packages, so you must ensure that you set ``priority=2``. :: + + [ceph-extras] + name=Ceph Extras Packages + baseurl=http://ceph.com/packages/ceph-extras/rpm/{distro}/$basearch + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-extras-noarch] + name=Ceph Extras noarch + baseurl=http://ceph.com/packages/ceph-extras/rpm/{distro}/noarch + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-extras-source] + name=Ceph Extras Sources + baseurl=http://ceph.com/packages/ceph-extras/rpm/c{distro}/SRPMS + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + +Add Ceph +======== + +Release repositories use the ``release.asc`` key to verify packages. +To install Ceph packages with the Advanced Package Tool (APT) or +Yellowdog Updater, Modified (YUM), you must add Ceph repositories. + +You may find releases for Debian/Ubuntu (installed with APT) at:: + + http://ceph.com/debian-{release-name} + +You may find releases for CentOS/RHEL and others (installed with YUM) at:: + + http://ceph.com/rpm-{release-name} + +The major releases of Ceph include: + +- **Emperor:** Emperor is the most recent major release of Ceph. These packages + are recommended for anyone deploying Ceph in a production environment. + Critical bug fixes are backported and point releases are made as necessary. + +- **Dumpling:** Dumpling is the fourth major release of Ceph. These packages + are recommended for anyone deploying Ceph in a production environment. + Critical bug fixes are backported as necessary. + +- **Cuttlefish:** Cuttlefish is the third major release of Ceph. These packages + are recommended for those who have already deployed bobtail in production and + are not yet ready to upgrade. + +- **Bobtail:** Bobtail is the second major release of Ceph. These packages are + recommended for those who have already deployed bobtail in production and + are not yet ready to upgrade. + +- **Argonaut:** Argonaut is the first major release of Ceph. These packages + are recommended for those who have already deployed Argonaut in production + and are not yet ready to upgrade. + +.. tip:: For European users, there is also a mirror in the Netherlands at: + http://eu.ceph.com/ + + +Debian Packages +--------------- + +Add a Ceph package repository to your system's list of APT sources. For newer +versions of Debian/Ubuntu, call ``lsb_release -sc`` on the command line to +get the short codename, and replace ``{codename}`` in the following command. :: + + sudo apt-add-repository 'deb http://ceph.com/debian-emperor/ {codename} main' + +For early Linux distributions, you may execute the following command:: + + echo deb http://ceph.com/debian-emperor/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list + +For earlier Ceph releases, replace ``{release-name}`` with the name with the +name of the Ceph release. You may call ``lsb_release -sc`` on the command line +to get the short codename, and replace ``{codename}`` in the following command. +:: + + sudo apt-add-repository 'deb http://ceph.com/debian-{release-name}/ {codename} main' + +For older Linux distributions, replace ``{release-name}`` with the name of the +release:: + + echo deb http://ceph.com/debian-{release-name}/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list + +Ceph on ARM processors requires Google's memory profiling tools (``google-perftools``). +The Ceph repository should have a copy at +http://ceph.com/packages/google-perftools/debian. :: + + echo deb http://ceph.com/packages/google-perftools/debian $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/google-perftools.list + + +For development release packages, add our package repository to your system's +list of APT sources. See `the testing Debian repository`_ for a complete list +of Debian and Ubuntu releases supported. :: + + echo deb http://ceph.com/debian-testing/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list + + +RPM Packages +------------ + +For major releases, you may add a Ceph entry to the ``/etc/yum.repos.d`` +directory. Create a ``ceph.repo`` file. In the example below, replace +``{ceph-release}`` with a major release of Ceph (e.g., ``dumpling``, +``emperor``, etc.) and ``{distro}`` with your Linux distribution (e.g., ``el6``, +``rhel6``, etc.). You may view http://ceph.com/rpm-{ceph-release}/ directory to +see which distributions Ceph supports. :: + + [ceph] + name=Ceph packages for $basearch + baseurl=http://ceph.com/rpm-{ceph-release}/{distro}/$basearch + enabled=1 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-noarch] + name=Ceph noarch packages + baseurl=http://ceph.com/rpm-{ceph-release}/{distro}/noarch + enabled=1 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-source] + name=Ceph source packages + baseurl=http://ceph.com/rpm-{ceph-release}/{distro}/SRPMS + enabled=0 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + +For development release packages, you may specify the repository +for development releases instead. :: + + [ceph] + name=Ceph packages for $basearch/$releasever + baseurl=http://ceph.com/rpm-testing/{distro}/$basearch + enabled=1 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-noarch] + name=Ceph noarch packages + baseurl=http://ceph.com/rpm-testing/{distro}/noarch + enabled=1 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-source] + name=Ceph source packages + baseurl=http://ceph.com/rpm-testing/{distro}/SRPMS + enabled=0 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + +For specific packages, you may retrieve them by specifically downloading the +release package by name. Our development process generates a new release of Ceph +every 3-4 weeks. These packages are faster-moving than the major releases. +Development packages have new features integrated quickly, while still +undergoing several weeks of QA prior to release. + +The repository package installs the repository details on your local system for +use with ``yum`` or ``up2date``. Replace ``{distro}`` with your Linux distribution, +and ``{release}`` with the specific release of Ceph:: + + su -c 'rpm -Uvh http://ceph.com/rpms/{distro}/x86_64/ceph-{release}.el6.noarch.rpm' + +You can download the RPMs directly from:: + + http://ceph.com/rpm-testing + + +Add Ceph Development +==================== + +Development repositories use the ``autobuild.asc`` key to verify packages. +If you are developing Ceph and need to deploy and test specific Ceph branches, +ensure that you remove repository entries for major releases first. + + +Debian Packages +--------------- + +We automatically build Debian and Ubuntu packages for current +development branches in the Ceph source code repository. These +packages are intended for developers and QA only. + +Add our package repository to your system's list of APT sources, but +replace ``{BRANCH}`` with the branch you'd like to use (e.g., chef-3, +wip-hack, master). See `the gitbuilder page`_ for a complete +list of distributions we build. :: + + echo deb http://gitbuilder.ceph.com/ceph-deb-$(lsb_release -sc)-x86_64-basic/ref/{BRANCH} $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list + + +RPM Packages +------------ + +For current development branches, you may add a Ceph entry to the +``/etc/yum.repos.d`` directory. Create a ``ceph.repo`` file. In the example +below, replace ``{distro}`` with your Linux distribution (e.g., ``centos6``, +``rhel6``, etc.), and ``{branch}`` with the name of the branch you want to +install. :: + + + [ceph-source] + name=Ceph source packages + baseurl=http://gitbuilder.ceph.com/ceph-rpm-{distro}-x86_64-basic/ref/{branch}/SRPMS + enabled=0 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + +You may view http://gitbuilder.ceph.com directory to see which distributions +Ceph supports. + + +Add Apache/FastCGI +================== + +Ceph Object Gateway works with ordinary Apache and FastCGI libraries. However, +Ceph builds Apache and FastCGI packages that support 100-continue. To use the +Ceph Apache and FastCGI packages, add them to your repository. + + +Debian Packages +--------------- + +Add our Apache and FastCGI packages to your system's list of APT sources if you intend to +use 100-continue. :: + + echo deb http://gitbuilder.ceph.com/apache2-deb-$(lsb_release -sc)-x86_64-basic/ref/master $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph-apache.list + echo deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-$(lsb_release -sc)-x86_64-basic/ref/master $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph-fastcgi.list + + +RPM Packages +------------ + +You may add a Ceph entry to the ``/etc/yum.repos.d`` directory. Create a +``ceph-apache.repo`` file. In the example below, replace ``{distro}`` with your +Linux distribution (e.g., ``el6``, ``rhel6``, etc.). You may view +http://gitbuilder.ceph.com directory to see which distributions Ceph supports. +:: + + + [apache2-ceph-noarch] + name=Apache noarch packages for Ceph + baseurl=http://gitbuilder.ceph.com/apache2-rpm-{distro}-x86_64-basic/ref/master + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + [apache2-ceph-source] + name=Apache source packages for Ceph + baseurl=http://gitbuilder.ceph.com/apache2-rpm-{distro}-x86_64-basic/ref/master + enabled=0 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + +Repeat the forgoing process by creating a ``ceph-fastcgi.repo`` file. :: + + [fastcgi-ceph-basearch] + name=FastCGI basearch packages for Ceph + baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-{distro}-x86_64-basic/ref/master + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + [fastcgi-ceph-noarch] + name=FastCGI noarch packages for Ceph + baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-{distro}-x86_64-basic/ref/master + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + [fastcgi-ceph-source] + name=FastCGI source packages for Ceph + baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-{distro}-x86_64-basic/ref/master + enabled=0 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + +Download Packages +================= + +If you are attempting to install behind a firewall in an environment without internet +access, you must retrieve the packages (mirrored with all the necessary dependencies) +before attempting an install. + +Debian Packages +--------------- + +Ceph requires additional additional third party libraries. + +- libaio1 +- libsnappy1 +- libcurl3 +- curl +- libgoogle-perftools4 +- google-perftools +- libleveldb1 + + +The repository package installs the repository details on your local system for +use with ``apt``. Replace ``{release}`` with the latest Ceph release. Replace +``{version}`` with the latest Ceph version number. Replace ``{distro}`` with +your Linux distribution codename. Replace ``{arch}`` with the CPU architecture. + +:: + + wget -q http://ceph.com/debian-{release}/pool/main/c/ceph/ceph_{version}{distro}_{arch}.deb + + +RPM Packages +------------ + +Ceph requires additional additional third party libraries. +To add the EPEL repository, execute the following:: + + su -c 'rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm' + +Ceph requires the following packages: + +- snappy +- leveldb +- gdisk +- python-argparse +- gperftools-libs + + +Packages are currently built for the RHEL/CentOS6 (``el6``), Fedora 18 and 19 +(``f18`` and ``f19``), OpenSUSE 12.2 (``opensuse12.2``), and SLES (``sles11``) +platforms. The repository package installs the repository details on your local +system for use with ``yum`` or ``up2date``. Replace ``{distro}`` with your +distribution. :: + + su -c 'rpm -Uvh http://ceph.com/rpm-emperor/{distro}/noarch/ceph-{version}.{distro}.noarch.rpm' + +For example, for CentOS 6 (``el6``):: + + su -c 'rpm -Uvh http://ceph.com/rpm-emperor/el6/noarch/ceph-release-1-0.el6.noarch.rpm' + +You can download the RPMs directly from:: + + http://ceph.com/rpm-emperor + + +For earlier Ceph releases, replace ``{release-name}`` with the name +with the name of the Ceph release. You may call ``lsb_release -sc`` on the command +line to get the short codename. :: + + su -c 'rpm -Uvh http://ceph.com/rpm-{release-name}/{distro}/noarch/ceph-{version}.{distro}.noarch.rpm' + + + + +.. _Install Ceph Object Storage: ../install-storage-cluster +.. _the testing Debian repository: http://ceph.com/debian-testing/dists +.. _the gitbuilder page: http://gitbuilder.ceph.com \ No newline at end of file diff --git a/doc/install/git.rst b/doc/install/git.rst deleted file mode 100644 index 2b56007941756..0000000000000 --- a/doc/install/git.rst +++ /dev/null @@ -1,50 +0,0 @@ -============ - Set Up Git -============ - -To clone the Ceph git repository, you must have ``git`` installed -on your local host. - -Install Git -=========== - -To install ``git``, execute:: - - sudo apt-get install git - -You must also have a ``github`` account. If you do not have a -``github`` account, go to `github.com`_ and register. -Follow the directions for setting up git at -`Set Up Git`_. - -.. _github.com: http://github.com -.. _Set Up Git: http://help.github.com/linux-set-up-git - -Generate SSH Keys -================= - -If you intend to commit code to Ceph or to clone using SSH (``git@github.com:ceph/ceph.git``), you must generate SSH keys for github. - -.. tip:: If you only intend to clone the repository, you may - use ``git clone --recursive https://github.com/ceph/ceph.git`` - without generating SSH keys. - -To generate SSH keys for ``github``, execute:: - - ssh-keygen - -Get the key to add to your ``github`` account (the following example -assumes you used the default file path):: - - cat .ssh/id_rsa.pub - -Copy the public key. - -Add the Key -=========== - -Go to your your ``github`` account, click on "Account Settings" (i.e., the -'tools' icon); then, click "SSH Keys" on the left side navbar. - -Click "Add SSH key" in the "SSH Keys" list, enter a name for the key, paste the -key you generated, and press the "Add key" button. \ No newline at end of file diff --git a/doc/install/index.rst b/doc/install/index.rst index 3be09c5d0dfa4..b5cc90b12c455 100644 --- a/doc/install/index.rst +++ b/doc/install/index.rst @@ -2,75 +2,56 @@ Installation (Manual) ======================= -.. raw:: html -

Advanced Package Tool (APT)

+Get Software +============ -If you are deploying a Ceph cluster on Debian or Ubuntu distributions, -use the instructions below to install packages manually. +There are several methods for getting Ceph software. The easiest and most common +method is to `get packages`_ by adding repositories for use with package +management tools such as the Advanced Package Tool (APT) or Yellowdog Updater, +Modified (YUM). You may also retrieve pre-compiled packages from the Ceph +repository. Finally, you can retrieve tarballs or clone the Ceph source code +repository and build Ceph yourself. -.. toctree:: - :maxdepth: 2 - - Installing Debian/Ubuntu Packages - Installing on Calxeda Hardware - Installing QEMU - Installing libvirt - -.. raw:: html - -

Redhat Package Manager (RPM) / Yellowdog Updater, Modified (YUM)

- -If you are deploying a Ceph cluster on Red Hat(rhel6), CentOS (el6), Fedora -17-19 (f17-f19), OpenSUSE 12 (opensuse12), and SLES (sles11) distributions, use -the instructions below to install packages manually. .. toctree:: - :maxdepth: 2 + :maxdepth: 1 - Installing RPM Packages - Installing YUM Priorities - Installing QEMU - Installing libvirt + Get Packages + Get Tarballs + Clone Source + Build Ceph -.. raw:: html -

Upgrading Ceph

- -If you are upgrading Ceph from a previous release, please read the the upgrade -documentation to ensure that you follow the proper upgrade sequence. +Install Software +================ -.. toctree:: - :maxdepth: 2 +Once you have the Ceph software (or added repositories), installing the software +is easy. To install packages on each :term:`Ceph Node` in your cluster. You may +use ``ceph-deploy`` to install Ceph for your storage cluster, or use package +management tools. You should install Yum Priorities for RHEL/CentOS and other +distributions that use Yum if you intend to install the Ceph Object Gateway or +QEMU. - Upgrading Ceph - +.. toctree:: + :maxdepth: 1 -.. raw:: html + Install ceph-deploy + Install Ceph Storage Cluster + Install Ceph Object Gateway + Install Virtualization for Block -

Building Ceph

+Upgrade Software +================ -You can build Ceph from source by downloading a release or cloning the ``ceph`` -repository at github. If you intend to build Ceph from source, please see the -build pre-requisites first. Making sure you have all the pre-requisites -will save you time. +As new versions of Ceph become available, you may upgrade your cluster to take +advantage of new functionality. Read the upgrade documentation before you +upgrade your cluster. Sometimes upgrading Ceph requires you to follow an upgrade +sequence. .. toctree:: - :maxdepth: 1 - - Prerequisites - Get a Tarball - Set Up Git - Clone the Source - Build the Source - Install CPU Profiler - Build a Package - Contributing Code - -See the `Development`_ section for additional development details. - -.. raw:: html + :maxdepth: 2 -
- -.. _Development: ../../dev \ No newline at end of file + Upgrading Ceph + +.. _get packages: ../install/get-packages \ No newline at end of file diff --git a/doc/install/install-ceph-deploy.rst b/doc/install/install-ceph-deploy.rst new file mode 100644 index 0000000000000..cb2eb34f968a2 --- /dev/null +++ b/doc/install/install-ceph-deploy.rst @@ -0,0 +1,23 @@ +===================== + Install Ceph Deploy +===================== + +The ``ceph-deploy`` tool is a enables you to set up and tear down Ceph clusters +for development, testing and proof-of-concept projects. + + +APT +--- + +To install ``ceph-deploy`` with ``apt``, execute the following:: + + sudo apt-get update && sudo apt-get install ceph-deploy + + +RPM +--- + +To install ``ceph-deploy`` with ``yum``, execute the following:: + + sudo yum install ceph-deploy + diff --git a/doc/install/install-ceph-gateway.rst b/doc/install/install-ceph-gateway.rst new file mode 100644 index 0000000000000..047885f73ed0c --- /dev/null +++ b/doc/install/install-ceph-gateway.rst @@ -0,0 +1,357 @@ +============================= + Install Ceph Object Gateway +============================= + +The :term:`Ceph Object Gateway` daemon runs on Apache and FastCGI. + +To run a :term:`Ceph Object Storage` service, you must install Apache and +FastCGI. Then, you must install the Ceph Object Gateway daemon. The Ceph Object +Gateway supports 100-continue, but you must install Ceph builds of Apache and +FastCGI for 100-continue support. To install the Ceph Object Gateway, first +install and configure Apache and FastCGI. Then, install the Ceph Object Gateway +daemon. If you plan to run a Ceph Object Storage service with a federated +architecture (multiple regions and zones), you must also install the +synchronization agent. + + +Apache/FastCGI w/out 100-Continue +================================= + +You may use standard Apache and FastCGI packages for your Ceph Object +Gateways. However, they will not provide 100-continue support. + +Debian Packages +--------------- + +To install Apache and FastCGI Debian packages, execute the following:: + + sudo apt-get install apache2 libapache2-mod-fastcgi + + +RPM Packages +------------ + +To install Apache and FastCGI RPMs, execute the following:: + + rpm -ivh fcgi-2.4.0-10.el6.x86_64.rpm + rpm -ivh mod_fastcgi-2.4.6-2.el6.rf.x86_64.rpm + +Or:: + + sudo yum install httpd mod_fastcgi + + +Apache/FastCGI w/ 100-Continue +============================== + +The Ceph community provides a slightly optimized version of the ``apache2`` +and ``fastcgi`` packages. The material difference is that the Ceph packages are +optimized for the ``100-continue`` HTTP response, where the server determines +if it will accept the request by first evaluating the request header. See `RFC +2616, Section 8`_ for details on ``100-continue``. You can find the most recent +builds of Apache and FastCGI packages modified for Ceph at `gitbuilder.ceph.com`_. + + +Debian Packages +--------------- + +#. Add a ``ceph-apache.list`` file to your APT sources. :: + + echo deb http://gitbuilder.ceph.com/apache2-deb-$(lsb_release -sc)-x86_64-basic/ref/master $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph-apache.list + +#. Add a ``ceph-fastcgi.list`` file to your APT sources. :: + + echo deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-$(lsb_release -sc)-x86_64-basic/ref/master $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph-fastcgi.list + +#. Update your repository and install Apache and FastCGI:: + + sudo apt-get update && sudo apt-get install apache2 libapache2-mod-fastcgi + + +RPM Packages +------------ + +To install Apache with 100-continue, execute the following steps: + +#. Install ``yum-plugin-priorities``. :: + + sudo yum install yum-plugin-priorities + +#. Ensure ``/etc/yum/pluginconf.d/priorities.conf`` exists. + +#. Ensure ``priorities.conf`` enables the plugin. :: + + [main] + enabled = 1 + +#. Add a ``ceph-apache.repo`` file to ``/etc/yum.repos.d``. Replace + ``{distro}`` with the name of your distribution (e.g., ``centos6``, + ``rhel6``, etc.) :: + + [apache2-ceph-noarch] + name=Apache noarch packages for Ceph + baseurl=http://gitbuilder.ceph.com/apache2-rpm-{distro}-x86_64-basic/ref/master + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + [apache2-ceph-source] + name=Apache source packages for Ceph + baseurl=http://gitbuilder.ceph.com/apache2-rpm-{distro}-x86_64-basic/ref/master + enabled=0 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + +#. Add a ``ceph-fastcgi.repo`` file to ``/etc/yum.repos.d``. Replace + ``{distro}`` with the name of your distribution (e.g., ``centos6``, + ``rhel6``, etc.) :: + + [fastcgi-ceph-basearch] + name=FastCGI basearch packages for Ceph + baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-centos6-x86_64-basic/ref/master + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + [fastcgi-ceph-noarch] + name=FastCGI noarch packages for Ceph + baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-centos6-x86_64-basic/ref/master + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + [fastcgi-ceph-source] + name=FastCGI source packages for Ceph + baseurl=http://gitbuilder.ceph.com/mod_fastcgi-rpm-centos6-x86_64-basic/ref/master + enabled=0 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc + + +#. Update your repo and install Apache and FastCGI. :: + + sudo yum update && sudo yum install httpd mod_fastcgi + + +Configure Apache/FastCGI +======================== + +To complete the installation, ensure that you have the rewrite module +enabled and FastCGI enabled. The steps differ slightly based upon the +type of package installation. + +Debian-based Packages +--------------------- + +#. Open the ``apache2.conf`` file. :: + + sudo vim /etc/apache2/apache2.conf + + +#. Add a line for the ``ServerName`` in the Apache configuration file. + Provide the fully qualified domain name of the server machine + (e.g., ``hostname -f``). :: + + ServerName {fqdn} + +#. Enable the URL rewrite modules for Apache and FastCGI. :: + + sudo a2enmod rewrite + sudo a2enmod fastcgi + + +#. Restart Apache so that the foregoing changes take effect. :: + + sudo service apache2 restart + + +RPM-based Packages +------------------ + + +#. Open the ``httpd.conf`` file. :: + + sudo vim /etc/httpd/conf/httpd.conf + +#. Uncomment ``#ServerName`` and add the name of your server. + Provide the fully qualified domain name of the server machine + (e.g., ``hostname -f``).:: + + ServerName {fgdn} + +#. Ensure that the Rewrite module is enabled. :: + + #if not present, add: + LoadModule rewrite_module modules/mod_rewrite.so + +#. Save the ``httpd.conf`` file. + +#. Ensure that the FastCGI module is enabled. The installer should + include an ``/etc/httpd/conf.d/fastcgi.conf`` file that loads the + FastCGI module. :: + + #if not present, add: + LoadModule fastcgi_module modules/mod_fastcgi.so + +#. Restart Apache so that the foregoing changes take effect.. :: + + etc/init.d/httpd restart + +.. _RFC 2616, Section 8: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html +.. _gitbuilder.ceph.com: http://gitbuilder.ceph.com +.. _Installing YUM Priorities: ../yum-priorities + + +Enable SSL +========== + +Some REST clients use HTTPS by default. So you should consider enabling SSL +for Apache. Use the following procedures to enable SSL. + +.. note:: You can use self-certified certificates. Some client + APIs check for a trusted certificate authority. You may need to obtain + a SSL certificate from a trusted authority to use those client APIs. + + +Debian Packages +--------------- + +To enable SSL for Debian/Ubuntu systems, execute the following steps: + +#. Ensure that you have installed the dependencies. :: + + sudo apt-get install openssl ssl-cert + +#. Enable the SSL module. :: + + sudo a2enmod ssl + +#. Generate a certificate. :: + + sudo mkdir /etc/apache2/ssl + sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/apache2/ssl/apache.key -out /etc/apache2/ssl/apache.crt + +#. Restart Apache. :: + + service apache2 restart + + +See the `Ubuntu Server Guide`_ for additional details. + + +RPM Packages +------------ + +To enable SSL for RPM-based systems, execute the following steps: + +#. Ensure that you have installed the dependencies. :: + + sudo yum install mod_ssl openssl + +#. Ensure the SSL module is enabled. + +#. Generate a certificate and copy it the appropriate locations. :: + + openssl x509 -req -days 365 -in ca.csr -signkey ca.key -out ca.crt + cp ca.crt /etc/pki/tls/certs + cp ca.key /etc/pki/tls/private/ca.key + cp ca.csr /etc/pki/tls/private/ca.csr + +#. Restart Apache. :: + + /etc/init.d/httpd restart + +See `Setting up an SSL secured Webserver with CentOS`_ for additional details. + + + +Add Wildcard to DNS +=================== + +To use Ceph with S3-style subdomains (e.g., ``bucket-name.domain-name.com``), +you need to add a wildcard to the DNS record of the DNS server you use with the +``radosgw`` daemon. + +.. tip:: The address of the DNS must also be specified in the Ceph + configuration file with the ``rgw dns name = {hostname}`` setting. + +For ``dnsmasq``, consider addding the following ``address`` setting with a dot +(.) prepended to the host name:: + + address=/.{hostname-or-fqdn}/{host-ip-address} + address=/.ceph-node/192.168.0.1 + +For ``bind``, consider adding the a wildcard to the DNS record:: + + $TTL 604800 + @ IN SOA ceph-node. root.ceph-node. ( + 2 ; Serial + 604800 ; Refresh + 86400 ; Retry + 2419200 ; Expire + 604800 ) ; Negative Cache TTL + ; + @ IN NS ceph-node. + @ IN A 192.168.122.113 + * IN CNAME @ + +Restart your DNS server and ping your server with a subdomain to +ensure that your Ceph Object Store ``radosgw`` daemon can process +the subdomain requests. :: + + ping mybucket.{fqdn} + ping mybucket.ceph-node + + +Install Ceph Object Gateway +=========================== + +Ceph Object Storage services use the Ceph Object Gateway daemon (``radosgw``) +to enable the gateway. For federated architectures, the synchronization +agent (``radosgw-agent``) provides data and metadata synchronization between +zones and regions. + + +Debian Packages +--------------- + +To install the Ceph Object Gateway daemon, execute the +following:: + + sudo apt-get install radosgw + + +To install the Ceph Object Gateway synchronization agent, execute the +following:: + + sudo apt-get radosgw-agent + + +RPM Packages +------------ + +To install the Ceph Object Gateway daemon, execute the +following:: + + yum install ceph-radosgw + + +To install the Ceph Object Gateway synchronization agent, execute the +following:: + + yum install radosgw-agent + + +.. _Ubuntu Server Guide: https://help.ubuntu.com/12.04/serverguide/httpd.html +.. _Setting up an SSL secured Webserver with CentOS: http://wiki.centos.org/HowTos/Https diff --git a/doc/install/install-storage-cluster.rst b/doc/install/install-storage-cluster.rst new file mode 100644 index 0000000000000..873e3f4594a88 --- /dev/null +++ b/doc/install/install-storage-cluster.rst @@ -0,0 +1,45 @@ +============================== + Install Ceph Storage Cluster +============================== + +This guide describes installing Ceph packages manually. This procedure +is only for users who are not installing with a deployment tool such as +``ceph-deploy``, ``chef``, ``juju``, etc. + +.. tip:: You can also use ``ceph-deploy`` to install Ceph packages, which may + be more convenient since you can install ``ceph`` on multiple hosts with + a single command. + + +Installing with APT +=================== + +Once you have added either release or development packages to APT, you should +update APT's database and install Ceph:: + + sudo apt-get update && sudo apt-get install ceph ceph-mds + + +Installing with RPM +=================== + +To install pre-requisite packages, execute the following:: + + sudo yum install snappy leveldb gdisk python-argparse gperftools-libs + +Once you have added either release or development packages, or added a +``ceph.repo`` file to ``/etc/yum.repos.d``, you can install Ceph packages. :: + + sudo yum install ceph + +Installing a Build +================== + +If you build Ceph from source code, you may install Ceph in user space +by executing the following:: + + sudo make install + +If you install Ceph locally, ``make`` will place the executables in +``usr/local/bin``. You may add the Ceph configuration file to the +``usr/local/bin`` directory to run Ceph from a single directory. \ No newline at end of file diff --git a/doc/install/install-vm-cloud.rst b/doc/install/install-vm-cloud.rst new file mode 100644 index 0000000000000..1c54814ddc0bd --- /dev/null +++ b/doc/install/install-vm-cloud.rst @@ -0,0 +1,169 @@ +========================================= + Install Virtualization for Block Device +========================================= + +If you intend to use Ceph Block Devices and the Ceph Storage Cluster as a +backend for Virtual Machines (VMs) or :term:`Cloud Platforms` the QEMU/KVM and +``libvirt`` packages are important for enabling VMs and cloud platforms. +Examples of VMs include: QEMU/KVM, XEN, VMWare, LXC, VirtualBox, etc. Examples +of Cloud Platforms include OpenStack, CloudStack, OpenNebula, etc. + + +.. ditaa:: +---------------------------------------------------+ + | libvirt | + +------------------------+--------------------------+ + | + | configures + v + +---------------------------------------------------+ + | QEMU | + +---------------------------------------------------+ + | librbd | + +------------------------+-+------------------------+ + | OSDs | | Monitors | + +------------------------+ +------------------------+ + + +Install QEMU +============ + +QEMU KVM can interact with Ceph Block Devices via ``librbd``, which is an +important feature for using Ceph with cloud platforms. Once you install QEMU, +see `QEMU and Block Devices`_ for usage. + + +Debian Packages +--------------- + +QEMU packages are incorporated into Ubuntu 12.04 Precise Pangolin and later +versions. To install QEMU, execute the following:: + + sudo apt-get install qemu + + +RPM Packages +------------ + +To install QEMU, execute the following: + +#. Install ``yum-plugin-priorities``. :: + + sudo yum install yum-plugin-priorities + +#. Ensure ``/etc/yum/pluginconf.d/priorities.conf`` exists. + +#. Ensure ``priorities.conf`` enables the plugin. :: + + [main] + enabled = 1 + +#. Create a ``/etc/yum.repos.d/ceph-qemu.conf`` file with the following + contents, and replace ``{distro}`` with your Linux distribution. Follow + the ``baseurl`` path below to see which distributions Ceph supports:: + + [ceph-qemu] + name=Ceph Packages for QEMU + baseurl=http://ceph.com/packages/ceph-extras/rpm/{distro}/$basearch + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-qemu-noarch] + name=Ceph QEMU noarch + baseurl=http://ceph.com/packages/ceph-extras/rpm/{distro}/noarch + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-qemu-source] + name=Ceph QEMU Sources + baseurl=http://ceph.com/packages/ceph-extras/rpm/{distro}/SRPMS + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + +#. Update your repositories. :: + + sudo yum update + +#. Install QEMU for Ceph. :: + + sudo yum install qemu-kvm qemu-kvm-tools qemu-img + +#. Install additional QEMU packages (optional):: + + sudo yum install qemu-guest-agent qemu-guest-agent-win32 + + +Building QEMU +------------- + +To build QEMU from source, use the following procedure:: + + cd {your-development-directory} + git clone git://git.qemu.org/qemu.git + cd qemu + ./configure --enable-rbd + make; make install + + + +Install libvirt +=============== + +To use ``libvirt`` with Ceph, you must have a running Ceph Storage Cluster, and +you must have installed and configured QEMU. See `Using libvirt with Ceph Block +Device`_ for usage. + + +Debian Packages +--------------- + +``libvirt`` packages are incorporated into Ubuntu 12.04 Precise Pangolin and +later versions of Ubuntu. To install ``libvirt`` on these distributions, +execute the following:: + + sudo apt-get update && sudo apt-get install libvirt-bin + + +RPM Packages +------------ + +To use ``libvirt`` with a Ceph Storage Cluster, you must have a running Ceph +Storage Cluster and you must also install a version of QEMU with ``rbd`` format +support. See `Install QEMU`_ for details. + + +``libvirt`` packages are incorporated into the recent CentOS/RHEL distributions. +To install ``libvirt``, execute the following:: + + sudo yum install libvirt + + +Building ``libvirt`` +-------------------- + +To build ``libvirt`` from source, clone the ``libvirt`` repository and use +`AutoGen`_ to generate the build. Then, execute ``make`` and ``make install`` to +complete the installation. For example:: + + git clone git://libvirt.org/libvirt.git + cd libvirt + ./autogen.sh + make + sudo make install + +See `libvirt Installation`_ for details. + + + +.. _libvirt Installation: http://www.libvirt.org/compiling.html +.. _AutoGen: http://www.gnu.org/software/autogen/ +.. _QEMU and Block Devices: ../../rbd/qemu-rbd +.. _Using libvirt with Ceph Block Device: ../../rbd/libvirt \ No newline at end of file diff --git a/doc/install/libvirt-deb.rst b/doc/install/libvirt-deb.rst deleted file mode 100644 index 9365e46c747e3..0000000000000 --- a/doc/install/libvirt-deb.rst +++ /dev/null @@ -1,43 +0,0 @@ -==================== - Installing libvirt -==================== - - -Prerequisites -============= - -- `Install`_ and `configure`_ a Ceph Storage Cluster -- `Install and configure`_ QEMU/KVM - - -Installing ``libvirt`` on Ubuntu 12.04 Precise -============================================== - -``libvirt`` packages are incorporated into the Ubuntu 12.04 precise -distribution. To install ``libvirt`` on precise, execute the following:: - - sudo apt-get update && sudo apt-get install libvirt-bin - - -Installing ``libvirt`` on Earlier Versions of Ubuntu -==================================================== - -For Ubuntu distributions 11.10 oneiric and earlier, you must build ``libvirt`` -from source. Clone the ``libvirt`` repository, and use `AutoGen`_ to generate -the build. Then, execute ``make`` and ``make install`` to complete the -installation. For example:: - - git clone git://libvirt.org/libvirt.git - cd libvirt - ./autogen.sh - make - sudo make install - -See `libvirt Installation`_ for details. - - -.. _libvirt Installation: http://www.libvirt.org/compiling.html -.. _AutoGen: http://www.gnu.org/software/autogen/ -.. _Install: ../index -.. _configure: ../../rados/configuration -.. _Install and configure: ../../rbd/qemu-rbd diff --git a/doc/install/libvirt-rpm.rst b/doc/install/libvirt-rpm.rst deleted file mode 100644 index a94c6e8ae1270..0000000000000 --- a/doc/install/libvirt-rpm.rst +++ /dev/null @@ -1,19 +0,0 @@ -==================== - Installing libvirt -==================== - -To use ``libvirt`` with a Ceph Storage Cluster, you must -have a running Ceph Storage Cluster. You must also install QEMU. -See `Installing QEMU`_ for details. - - -``libvirt`` packages are incorporated into the recent CentOS/RHEL distributions. -To install ``libvirt``, execute the following:: - - sudo yum install libvirt - -See `libvirt Installation`_ for details. - - -.. _libvirt Installation: http://www.libvirt.org/compiling.html -.. _Installing QEMU: ../qemu-rpm \ No newline at end of file diff --git a/doc/install/qemu-deb.rst b/doc/install/qemu-deb.rst deleted file mode 100644 index 29abeafa3bc3d..0000000000000 --- a/doc/install/qemu-deb.rst +++ /dev/null @@ -1,26 +0,0 @@ -================= - Installing QEMU -================= - - - -Installing QEMU (12.04 Precise and later) -========================================= - -QEMU packages are incorporated into Ubuntu 12.04 Precise Pangolin and later -versions. To install QEMU, execute the following:: - - sudo apt-get install qemu - -Installing QEMU (11.10 Oneric and earlier) -========================================== - -For Ubuntu distributions 11.10 Oneiric and earlier, you must install -the 0.15 version of QEMU or later. To build QEMU from source, use the -following procedure:: - - cd {your-development-directory} - git clone git://git.qemu.org/qemu.git - cd qemu - ./configure --enable-rbd - make; make install diff --git a/doc/install/qemu-rpm.rst b/doc/install/qemu-rpm.rst deleted file mode 100644 index 67da2c3714c35..0000000000000 --- a/doc/install/qemu-rpm.rst +++ /dev/null @@ -1,56 +0,0 @@ -================= - Installing QEMU -================= - -To install QEMU with ``yum``, you must ensure that you have -``yum-plugin-priorities`` installed. See `Installing YUM Priorities`_ -for details. - -To install QEMU, execute the following: - -#. Create a ``/etc/yum.repos.d/ceph-qemu.conf`` file with the following - contents:: - - [ceph-qemu] - name=Ceph Packages for QEMU - baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6.3/$basearch - enabled=1 - priority=2 - gpgcheck=1 - type=rpm-md - gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc - - [ceph-qemu-noarch] - name=Ceph QEMU noarch - baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6.3/noarch - enabled=1 - priority=2 - gpgcheck=1 - type=rpm-md - gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc - - [ceph-qemu-source] - name=Ceph QEMU Sources - baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6.3/SRPMS - enabled=1 - priority=2 - gpgcheck=1 - type=rpm-md - gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc - -#. Update your repositories. :: - - sudo yum update - -#. Install QEMU for Ceph. :: - - sudo yum install qemu-kvm qemu-kvm-tools qemu-img - -#. Install additional QEMU packages (optional):: - - sudo yum install qemu-guest-agent qemu-guest-agent-win32 - -See `QEMU and Block Devices`_ for usage. - -.. _QEMU and Block Devices: ../../rbd/qemu-rbd -.. _Installing YUM Priorities: ../yum-priorities \ No newline at end of file diff --git a/doc/install/rpm.rst b/doc/install/rpm.rst deleted file mode 100644 index 9e8cdcd003c44..0000000000000 --- a/doc/install/rpm.rst +++ /dev/null @@ -1,193 +0,0 @@ -======================== - Installing RPM Packages -======================== - -You may install stable release packages (for stable deployments), -development release packages (for the latest features), or development -testing packages (for development and QA only). Do not add multiple -package sources at the same time. - - -Install Release Key -=================== - -Packages are cryptographically signed with the ``release.asc`` key. Add our -release key to your system's list of trusted keys to avoid a security warning:: - - sudo rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' - - -Install Prerequisites -===================== - -Ceph may require additional additional third party libraries. -To add the EPEL repository, execute the following:: - - su -c 'rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm' - -Some releases of Ceph require the following packages: - -- snappy -- leveldb -- gdisk -- python-argparse -- gperftools-libs - -To install these packages, execute the following:: - - sudo yum install snappy leveldb gdisk python-argparse gperftools-libs - - -Add Release Packages -==================== - - -Dumpling --------- - -Dumpling is the most recent stable release of Ceph. These packages are -recommended for anyone deploying Ceph in a production environment. -Critical bug fixes are backported and point releases are made as necessary. - -Packages are currently built for the RHEL/CentOS6 (``el6``), Fedora 18 and 19 -(``f18`` and ``f19``), OpenSUSE 12.2 (``opensuse12.2``), and SLES (``sles11``) -platforms. The repository package installs the repository details on your local -system for use with ``yum`` or ``up2date``. - -For example, for CentOS 6 or other RHEL6 derivatives (``el6``):: - - su -c 'rpm -Uvh http://ceph.com/rpm-dumpling/el6/noarch/ceph-release-1-0.el6.noarch.rpm' - -You can download the RPMs directly from:: - - http://ceph.com/rpm-dumpling - - -Cuttlefish ----------- - -Cuttlefish is the previous recent major release of Ceph. These packages are -recommended for those who have already deployed bobtail in production and are -not yet ready to upgrade. - -Packages are currently built for the RHEL/CentOS6 (``el6``), Fedora 17 -(``f17``), OpenSUSE 12 (``opensuse12``), and SLES (``sles11``) -platforms. The repository package installs the repository details on -your local system for use with ``yum`` or ``up2date``. - -For example, for CentOS 6 or other RHEL6 derivatives (``el6``):: - - su -c 'rpm -Uvh http://ceph.com/rpm-cuttlefish/el6/noarch/ceph-release-1-0.el6.noarch.rpm' - -You can download the RPMs directly from:: - - http://ceph.com/rpm-cuttlefish - - -Bobtail -------- - -Bobtail is the second major release of Ceph. These packages are -recommended for those who have already deployed bobtail in production and -are not yet ready to upgrade. - -Packages are currently built for the RHEL/CentOS6 (``el6``), Fedora 17 -(``f17``), OpenSUSE 12 (``opensuse12``), and SLES (``sles11``) -platforms. The repository package installs the repository details on -your local system for use with ``yum`` or ``up2date``. - -Replace the``{DISTRO}`` below with the distro codename:: - - su -c 'rpm -Uvh http://ceph.com/rpm-bobtail/{DISTRO}/x86_64/ceph-release-1-0.el6.noarch.rpm' - -For example, for CentOS 6 or other RHEL6 derivatives (``el6``):: - - su -c 'rpm -Uvh http://ceph.com/rpm-bobtail/el6/x86_64/ceph-release-1-0.el6.noarch.rpm' - -You can download the RPMs directly from:: - - http://ceph.com/rpm-bobtail - - -Development Release Packages ----------------------------- - -Our development process generates a new release of Ceph every 3-4 weeks. These -packages are faster-moving than the stable releases. Development packages have -new features integrated quickly, while still undergoing several weeks of QA -prior to release. - -Packages are cryptographically signed with the ``release.asc`` key. Add our -release key to your system's list of trusted keys to avoid a security warning:: - - sudo rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc' - -Packages are currently built for the CentOS-6 and Fedora 17 platforms. The -repository package installs the repository details on your local system for use -with ``yum`` or ``up2date``. - -For CentOS-6:: - - su -c 'rpm -Uvh http://ceph.com/rpms/el6/x86_64/ceph-release-1-0.el6.noarch.rpm' - -For Fedora 17:: - - su -c 'rpm -Uvh http://ceph.com/rpms/fc17/x86_64/ceph-release-1-0.fc17.noarch.rpm' - -You can download the RPMs directly from:: - - http://ceph.com/rpm-testing - - - -Adding Ceph to YUM -================== - -You may also add Ceph to the ``/etc/yum.repos.d`` directory. Create a -``ceph.repo`` file. In the example below, replace ``{ceph-stable}`` with -a stable release of Ceph (e.g., ``cuttlefish``, ``dumpling``, etc.) and -``{distro}`` with your Linux distribution (e.g., ``el6``, ``rhel6``, etc.). :: - - [ceph] - name=Ceph packages for $basearch - baseurl=http://ceph.com/rpm-{ceph-stable}/{distro}/$basearch - enabled=1 - gpgcheck=1 - type=rpm-md - gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc - - [ceph-noarch] - name=Ceph noarch packages - baseurl=http://ceph.com/rpm-{ceph-stable}/{distro}/noarch - enabled=1 - gpgcheck=1 - type=rpm-md - gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc - - [ceph-source] - name=Ceph source packages - baseurl=http://ceph.com/rpm-{ceph-stable}/{distro}/SRPMS - enabled=0 - gpgcheck=1 - type=rpm-md - gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc - - -Installing Ceph Deploy -====================== - -Once you have added either release or development packages, or added a -``ceph.repo`` file to ``/etc/yum.repos.d``, you can install ``ceph-deploy``. :: - - sudo yum install ceph-deploy python-pushy - - -Installing Ceph Packages -======================== - -Once you have added either release or development packages, or added a -``ceph.repo`` file to ``/etc/yum.repos.d``, you can install Ceph packages. :: - - sudo yum install ceph - -.. note:: You can also use ``ceph-deploy`` to install Ceph packages. diff --git a/doc/install/yum-priorities.rst b/doc/install/yum-priorities.rst deleted file mode 100644 index e4adb72b7dd5c..0000000000000 --- a/doc/install/yum-priorities.rst +++ /dev/null @@ -1,20 +0,0 @@ -=========================== - Installing YUM Priorities -=========================== - -Ceph builds packages for Apache and FastCGI (for 100-continue support) and -QEMU (for ``rbd`` support). You must set priorities in your ``.repo`` -files to ensure that ``yum`` installs the Ceph packages instead of the -standard packages. The ``priorities`` setting requires you to install -and enable ``yum-plugin-priorities``. - -#. Install ``yum-plugin-priorities``. :: - - sudo yum install yum-plugin-priorities - -#. Ensure ``/etc/yum/pluginconf.d/priorities.conf`` exists. :: - -#. Ensure ``priorities.conf`` enables the plugin. :: - - [main] - enabled = 1 diff --git a/doc/rados/troubleshooting/cpu-profiling.rst b/doc/rados/troubleshooting/cpu-profiling.rst index e46c5cab5942e..159f7998dcb99 100644 --- a/doc/rados/troubleshooting/cpu-profiling.rst +++ b/doc/rados/troubleshooting/cpu-profiling.rst @@ -64,4 +64,4 @@ To reset ``oprofile``, execute the following command:: you do not commingle results from different tests. .. _oprofile: http://oprofile.sourceforge.net/about/ -.. _Installing Oprofile: ../../../install/cpu-profiler +.. _Installing Oprofile: ../../../dev/cpu-profiler diff --git a/doc/radosgw/config.rst b/doc/radosgw/config.rst index caa3dac15e145..5cf437dc72d18 100644 --- a/doc/radosgw/config.rst +++ b/doc/radosgw/config.rst @@ -399,7 +399,7 @@ according to the ``rgw keystone accepted roles`` configurable. Keystone itself needs to be configured to point to the Ceph Object Gateway as an object-storage endpoint:: - keystone service-create --name swift --type-object store + keystone service-create --name swift --type object-store keystone endpoint-create --service-id --publicurl http://radosgw.example.com/swift/v1 \ --internalurl http://radosgw.example.com/swift/v1 --adminurl http://radosgw.example.com/swift/v1 diff --git a/doc/radosgw/federated-config.rst b/doc/radosgw/federated-config.rst new file mode 100644 index 0000000000000..67c5c89d2d3c0 --- /dev/null +++ b/doc/radosgw/federated-config.rst @@ -0,0 +1,735 @@ +================================ + Configuring Federated Gateways +================================ + +.. versionadded:: 0.72 Emperor + +In Ceph version 0.72 Emperor and beyond, you may configure each :term:`Ceph +Object Gateway` to participate in a federated architecture, with multiple +regions, and with multiple zones for a region. + +- **Region**: A region represents a *logical* geographic area and contains one + or more zones. A cluster with multiple regions must specify a master region. + +- **Zone**: A zone is a *logical* grouping of one or more Ceph Object Gateway + instance(s). A region has a master zone that processes client requests. + +.. important:: Only write objects to the master zone in a region. You may read + objects from secondary zones. Currently, the Gateway does not prevent you + from writing to a secondary zone, but DON'T DO IT. + +Background +========== + +When you deploy a :term:`Ceph Object Store` service that spans geographical +locales, configuring Ceph Object Gateway regions and metadata synchronization +agents enables the service to maintain a global namespace, even though Ceph +Object Gateway instances run in different geographic locales and potentially on +different Ceph Storage Clusters. When you separate one or more Ceph Object +Gateway instances within a region into separate logical containers to maintain +an extra copy (or copies) of the data, configuring Ceph Object Gateway zones and +data synchronization agents enables the service to maintain one or more +copy(ies) of the master zone's data. Extra copies of the data are important for +failover, backup and disaster recovery. + +You may deploy a single Ceph Storage Cluster with a federated architecture if +you have low latency network connections (this isn't recommended). You may also +deploy one Ceph Storage Cluster per region with a separate set of pools for +each zone (typical). You may also deploy a separate Ceph Storage Cluster for +each zone if your requirements and resources warrant this level of redundancy. + +About this Guide +================ + +In the following sections, we will demonstrate how to configure a federated +cluster in two logical steps: + +- **Configure a Master Region:** This section of the guide describes how to + set up a region with multiple zones, and how to synchronize data between the + master zone and the secondary zone(s) within the master region. + +- **Configure a Secondary Region:** This section of the guide describes how + to repeat the section on setting up a master region and multiple zones so + that you have two regions with intra-zone synchronization in each region. + Finally, you will learn how to set up a metadata synchronization agent so + that you can maintain a global namespace for the regions in your cluster. + + + +Configure a Master Region +========================= + +This section provides an exemplary procedure for setting up a region, and two +zones within the region. The cluster will comprise two gateway daemon +instances--one per zone. This region will serve as the master region. + + +Naming for the Master Region +---------------------------- + +Before configuring the cluster, defining region, zone and instance names will +help you manage your cluster. Let's assume the region represents the United +States, and we refer to it by its standard abbreviation. + +- United States: ``us`` + +Let's assume the zones represent the Eastern and Western United States. For +continuity, our naming convention will use ``{region name}-{zone name}`` format, +but you can use any naming convention you prefer. + +- United States, East Region: ``us-east`` +- United States, West Region: ``us-west`` + +Finally, let's assume that zones may have more than one Ceph Object Gateway +instance per zone. For continuity, our naming convention will use +``{region name}-{zone name}-{instance}`` format, but you can use any naming +convention you prefer. + + +- United States Region, Master Zone, Instance 1: ``us-east-1`` +- United States Region, Secondary Zone, Instance 1: ``us-west-1`` + + +Create Pools +------------ + +You may have a Ceph Storage Cluster for the entire region or a Ceph Storage +Cluster for each zone. + +For continuity, our naming convention will use ``{region name}-{zone name}`` +format prepended to the pool name, but you can use any naming convention you +prefer. For example: + + +- ``.us.rgw.root`` + +- ``.us-east.rgw.root`` +- ``.us-east.rgw.control`` +- ``.us-east.rgw.gc`` +- ``.us-east.log`` +- ``.us-east.intent-log`` +- ``.us-east.usage`` +- ``.us-east.users`` +- ``.us-east.users.email`` +- ``.us-east.users.swift`` +- ``.us-east.users.uid`` + +- ``.us-west.rgw.root`` +- ``.us-west.rgw.control`` +- ``.us-west.rgw.gc`` +- ``.us-west.log`` +- ``.us-west.intent-log`` +- ``.us-west.usage`` +- ``.us-west.users`` +- ``.us-west.users.email`` +- ``.us-west.users.swift`` +- ``.us-west.users.uid`` + +See `Configuration Reference - Pools`_ for details on the default pools for +gateways. See `Pools`_ for details on creating pools. Execute the following +to create a pool:: + + ceph osd pool create {poolname} {pg-num} {pgp-num} + + +.. tip:: When adding a large number of pools, it may take some time for your + cluster to return to a ``active + clean`` state. + +.. topic:: CRUSH Maps + + When deploying a Ceph Storage Cluster for the entire region, consider + using a CRUSH rule for the the zone such that you do NOT have overlapping + failure domains. See `CRUSH Map`_ for details. + +When you have completed this step, execute the following to ensure that +you have created all of the foregoing pools:: + + rados lspools + + +Create a Keyring +---------------- + +Each instance must have a user name and key to communicate with a Ceph Storage +Cluster. In the following steps, we use an admin node to create a keyring. +Then, we create a client user name and key for each instance. Next, we add the +keys to the Ceph Storage Cluster(s). Finally, we distribute the key ring to +each node containing an instance. + +#. Create a keyring. :: + + sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring + sudo chmod +r /etc/ceph/ceph.client.radosgw.keyring + + +#. Generate a Ceph Object Gateway user name and key for each instance. :: + + sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.us-east-1 --gen-key + sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.us-west-1 --gen-key + + +#. Add capabilities to each key. See `Configuration Reference - Pools`_ for details + on the effect of write permissions for the monitor and creating pools. :: + + sudo ceph-authtool -n client.radosgw.us-east-1 --cap osd 'allow rwx' --cap mon 'allow rw' /etc/ceph/ceph.client.radosgw.keyring + sudo ceph-authtool -n client.radosgw.us-west-1 --cap osd 'allow rwx' --cap mon 'allow rw' /etc/ceph/ceph.client.radosgw.keyring + + +#. Once you have created a keyring and key to enable the Ceph Object Gateway + with access to the Ceph Storage Cluster, add each key as an entry to your + Ceph Storage Cluster(s). For example:: + + sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.us-east-1 -i /etc/ceph/ceph.client.radosgw.keyring + sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.us-west-1 -i /etc/ceph/ceph.client.radosgw.keyring + + + +Install Apache/FastCGI +---------------------- + +For each :term:`Ceph Node` that runs a :term:`Ceph Object Gateway` daemon +instance, you must install Apache, FastCGI, the Ceph Object Gateway daemon +(``radosgw``) and the Ceph Object Gateway Sync Agent (``radosgw-agent``). +See `Install Apache, FastCGI and Gateway`_ for details. + + +Create Data Directories +----------------------- + +Create data directories for each daemon instance on their respective +hosts. :: + + ssh {us-east-1} + sudo mkdir -p /var/lib/ceph/radosgw/ceph-radosgw.us-east-1 + + ssh {us-west-1} + sudo mkdir -p /var/lib/ceph/radosgw/ceph-radosgw.us-west-1 + + +Create a Gateway Configuration +------------------------------ + +For each instance, create an Ceph Object Gateway configuration file under the +``/etc/apache2/sites-available`` directory on the host(s) where you installed +the Ceph Object Gateway daemon(s). See below for an exemplary embodiment of a +gateway configuration as discussed in the following text. + +.. literalinclude:: rgw.conf + :language: ini + +#. Replace the ``/{path}/{socket-name}`` entry with path to the socket and + the socket name. For example, + ``/var/run/ceph/client.radosgw.us-east-1.sock``. Ensure that you use the + same path and socket name in your ``ceph.conf`` entry. + +#. Replace the ``{fqdn}`` entry with the fully-qualified domain name of the + server. + +#. Replace the ``{email.address}`` entry with the email address for the + server administrator. + +#. Add a ``ServerAlias`` if you wish to use S3-style subdomains + (of course you do). + +#. Save the configuration to a file (e.g., ``rgw-us-east.conf``). + +Repeat the process for the secondary zone (e.g., ``rgw-us-west.conf``). + + +Enable the Configuration +------------------------ + +For each instance, enable the gateway configuration and disable the +default site. + +#. Enable the site for the gateway configuration. :: + + sudo a2ensite {rgw-conf-filename} + +#. Disable the default site. :: + + sudo a2dissite default + +.. note:: Failure to disable the default site can lead to problems. + + +Add a FastCGI Script +-------------------- + +FastCGI requires a script for each Ceph Object Gateway instance to +enable the S3-compatible interface. To create the script, execute +the following procedures. + + +#. Go to the ``/var/www`` directory. :: + + cd /var/www + +#. Open an editor with the file name ``s3gw.fcgi``. **Note:** The configuration + file specifies this filename. :: + + sudo vim s3gw.fcgi + +#. Add a shell script with ``exec`` and the path to the gateway binary, + the path to the Ceph configuration file, and the user name (``-n``; + the same user name created in step 2 of `Create a Keyring`_. + Copy the following into the editor. :: + + #!/bin/sh + exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.{ID} + + For example:: + + #!/bin/sh + exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.us-east-1 + +#. Save the file. + +#. Change the permissions on the file so that it is executable. :: + + sudo chmod +x s3gw.fcgi + + +Repeat the process for the secondary zone. + + +Add Instances to Ceph Config File +--------------------------------- + +On an admin node, add an entry for each instance in the Ceph configuration file +for your Ceph Storage Cluster(s). For example:: + + ... + + [client.radosgw.us-east-1] + rgw region = us + rgw region root pool = .us.rgw.root + rgw zone = us-east + rgw zone root pool = .us-east.rgw.root + keyring = /etc/ceph/ceph.client.radosgw.keyring + rgw dns name = {hostname} + rgw socket path = /var/run/ceph/$name.sock + host = {host-name} + + [client.radosgw.us-west-1] + rgw region = us + rgw region root pool = .us.rgw.root + rgw zone = ny-queens + rgw zone root pool = .us-west.rgw.root + keyring = /etc/ceph/ceph.client.radosgw.keyring + rgw dns name = {hostname} + rgw socket path = /var/run/ceph/$name.sock + host = {host-name} + + +Then, update each :term:`Ceph Node` with the updated Ceph configuration +file. For example:: + + ceph-deploy --overwrite-conf config {node1} {node2} {nodex} + + +Create a Region +--------------- + +#. Configure a region infile called ``us.json`` for the ``us`` region. + + Copy the contents of the following example to a text editor. Set + ``is_master`` to ``true``. Replace ``{fqdn}`` with the fully-qualified + domain name of the endpoint. It will specify a master zone as ``us-east`` + and list it in the ``zones`` list along with the ``us-west`` zone. + See `Configuration Reference - Regions`_ for details.:: + + { "name": "us", + "api_name": "us", + "is_master": "true", + "endpoints": [ + "http:\/\/{fqdn}:80\/"], + "master_zone": "us-east", + "zones": [ + { "name": "us-east", + "endpoints": [ + "http:\/\/{fqdn}:80\/"], + "log_meta": "false", + "log_data": "false"}, + { "name": "us-west", + "endpoints": [ + "http:\/\/{fqdn}:80\/"], + "log_meta": "false", + "log_data": "false"}], + "placement_targets": [], + "default_placement": ""} + + +#. Create the ``us`` region using the ``us.json`` infile you just + created. :: + + radosgw-admin region set --infile us.json --name client.radosgw.us-east-1 + +#. Delete the default region (if it exists). :: + + rados -p .us.rgw.root rm region_info.default + +#. Set the ``us`` region as the default region. :: + + radosgw-admin region default --rgw-region=us --name client.radosgw.us-east-1 + + Only one region can be the default region for a cluster. + +#. Update the region map. :: + + radosgw-admin regionmap update --name client.radosgw.us-east-1 + + + +Create Zones +------------ + +#. Configure a zone infile called ``us-east.json`` for the + ``us-east`` zone. + + Copy the contents of the following example to a text editor. + This configuration uses pool names prepended with the region name and + zone name. See `Configuration Reference - Pools`_ for additional details on + gateway pools. See `Configuration Reference - Zones`_ for additional + details on zones. :: + + { "domain_root": ".us-east.rgw.root", + "control_pool": ".us-east.rgw.control", + "gc_pool": ".us-east.rgw.gc", + "log_pool": ".us-east.log", + "intent_log_pool": ".us-east.intent-log", + "usage_log_pool": ".us-east.usage", + "user_keys_pool": ".us-east.users", + "user_email_pool": ".us-east.users.email", + "user_swift_pool": ".us-east.users.swift", + "user_uid_pool": ".us-east.users.uid", + "system_key": { "access_key": "", "secret_key": ""} + } + + +#. Add the ``us-east`` zone using the ``us-east.json`` infile you + just created in both the east and west pools by specifying their respective + user names (i.e., ``--name``). :: + + radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.us-east-1 + radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.us-west-1 + + Repeat step 1 to create a zone infile for ``us-west``. Then add the zone + using the ``us-west.json`` infile in both the east and west pools by + specifying their respective user names (i.e., ``--name``). :: + + radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.us-east-1 + radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.us-west-1 + + +#. Delete the default zone (if it exists). :: + + rados -p .rgw.root rm zone_info.default + + +#. Update the region map. :: + + radosgw-admin regionmap update --name client.radosgw.us-east-1 + + + +Create Zone Users +----------------- + +Ceph Object Gateway stores zone users in the zone pools. So you must create zone +users after configuring the zones. Copy the ``access_key`` and ``secret_key`` +fields for each user so you can update your zone configuration once you complete +this step. :: + + radosgw-admin user create --uid="us-east" --display-name="Region-US Zone-East" --name client.radosgw.us-east-1 --system + radosgw-admin user create --uid="us-west" --display-name="Region-US Zone-West" --name client.radosgw.us-west-1 --system + + +Update Zone Configurations +-------------------------- + +You must update the zone configuration with zone users so that +the synchronization agents can authenticate with the zones. + +#. Open your ``us-east.json`` zone configuration file and paste the contents of + the ``access_key`` and ``secret_key`` fields from the step of creating + zone users into the ``system_key`` field of your zone configuration + infile. :: + + { "domain_root": ".us-east.rgw", + "control_pool": ".us-east.rgw.control", + "gc_pool": ".us-east.rgw.gc", + "log_pool": ".us-east.log", + "intent_log_pool": ".us-east.intent-log", + "usage_log_pool": ".us-east.usage", + "user_keys_pool": ".us-east.users", + "user_email_pool": ".us-east.users.email", + "user_swift_pool": ".us-east.users.swift", + "user_uid_pool": ".us-east.users.uid", + "system_key": { + "access_key": "{paste-access_key-here}", + "secret_key": "{paste-secret_key-here}" + }, + "placement_pools": [] + } + +#. Save the ``us-east.json`` file. Then, update your zone configuration. :: + + radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.us-east-1 + radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.us-west-1 + +#. Repeat step 1 to update the zone infile for ``us-west``. Then, update + your zone configuration. :: + + radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.us-east-1 + radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.us-west-1 + + +Restart Services +---------------- + +Once you have redeployed your Ceph configuration files, we recommend restarting +your Ceph Storage Cluster(s) and Apache instances. + +For Ubuntu, use the following on each :term:`Ceph Node`:: + + sudo restart ceph-all + +For Red Hat/CentOS, use the following:: + + sudo /etc/init.d/ceph restart + +To ensure that all components have reloaded their configurations, for each +gateway instance we recommend restarting the ``apache2`` service. For example:: + + sudo service apache2 restart + + +Start Gateway Instances +----------------------- + +Start up the ``radosgw`` service. :: + + sudo /etc/init.d/radosgw start + +If you are running multiple instances on the same host, you must specify the +user name. :: + + sudo /etc/init.d/radosgw start --name client.radosgw.us-east-1 + + +Open a browser and check the endpoints for each zone. A simple HTTP request +to the domain name should return the following: + +.. code-block:: xml + + + + anonymous + + + + + + +Configure a Secondary Region +============================ + +This section provides an exemplary procedure for setting up a cluster with +multiple regions. Configuring a cluster that spans regions requires maintaining +a global namespace, so that there are no namespace clashes among object names +stored across in different regions. + +This section extends the procedure in `Configure a Master Region`_, but +changes the region name and modifies a few procedures. See the following +sections for details. + + +Naming for the Secondary Region +------------------------------- + +Before configuring the cluster, defining region, zone and instance names will +help you manage your cluster. Let's assume the region represents the European +Union, and we refer to it by its standard abbreviation. + +- European Union: ``eu`` + +Let's assume the zones represent the Eastern and Western European Union. For +continuity, our naming convention will use ``{region name}-{zone name}`` +format, but you can use any naming convention you prefer. + +- European Union, East Region: ``eu-east`` +- European Union, West Region: ``eu-west`` + +Finally, let's assume that zones may have more than one Ceph Object Gateway +instance per zone. For continuity, our naming convention will use +``{region name}-{zone name}-{instance}`` format, but you can use any naming +convention you prefer. + +- European Union Region, Master Zone, Instance 1: ``eu-east-1`` +- European Union Region, Secondary Zone, Instance 1: ``eu-west-1`` + + +Configuring a Secondary Region +------------------------------ + +Repeat the exemplary procedure of `Configure a Master Region`_ +with the following differences: + +#. Use `Naming for the Secondary Region`_ in lieu of `Naming for + the Master Region`_. + +#. `Create Pools`_ using ``eu`` instead of ``us``. + +#. `Create a Keyring`_ and the corresponding keys using ``eu`` instead of + ``us``. You may use the same keyring if you desire, but ensure that you + create the keys on the Ceph Storage Cluster for that region (or region + and zone). + +#. `Install Apache/FastCGI`_. + +#. `Create Data Directories`_ using ``eu`` instead of ``us``. + +#. `Create a Gateway Configuration`_ using ``eu`` instead of ``us`` for + the socket names. + +#. `Enable the Configuration`_. + +#. `Add a FastCGI Script`_ using ``eu`` instead of ``us`` for the user names. + +#. `Add Instances to Ceph Config File`_ using ``eu`` instead of ``us`` for the + pool names. + +#. `Create a Region`_ using ``eu`` instead of ``us``. Set ``is_master`` to + ``false``. For consistency, create the master region in the secondary region + too. :: + + radosgw-admin region set --infile us.json --name client.radosgw.eu-east-1 + +#. `Create Zones`_ using ``eu`` instead of ``us``. Ensure that you update the + user name (i.e., ``--name``) so that you create the zones in the correct + cluster. + +#. `Update Zone Configurations`_ using ``eu`` instead of ``us``. + +#. Create zones from master region in the secondary region. :: + + radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.eu-east-1 + radosgw-admin zone set --rgw-zone=us-east --infile us-east.json --name client.radosgw.eu-west-1 + radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.eu-east-1 + radosgw-admin zone set --rgw-zone=us-west --infile us-west.json --name client.radosgw.eu-west-1 + +#. Create zones from secondary region in the master region. :: + + radosgw-admin zone set --rgw-zone=eu-east --infile eu-east.json --name client.radosgw.us-east-1 + radosgw-admin zone set --rgw-zone=eu-east --infile eu-east.json --name client.radosgw.us-west-1 + radosgw-admin zone set --rgw-zone=eu-west --infile eu-west.json --name client.radosgw.us-east-1 + radosgw-admin zone set --rgw-zone=eu-west --infile eu-west.json --name client.radosgw.us-west-1 + +#. `Restart Services`_. + +#. `Start Gateway Instances`_. + + +Multi-Site Data Replication +=========================== + +The data synchronization agent replicates the data of a master zone to a +secondary zone. The master zone of a region is the source for the secondary zone +of the region and it gets selected automatically. + +.. image:: ../images/zone-sync.png + +To configure the synchronization agent, retrieve the access key and secret for +the source and destination, and the destination URL and port. + +You may use ``radosgw-admin zone list`` to get a list of zone names. You +may use ``radosgw-admin zone get`` to identify the key and secret for the +zone. You may refer to the gateway configuration file you created under +`Create a Gateway Configuration`_ to identify the port number. + +You only need the hostname and port for a single instance (assuming all +gateway instances in a region/zone access the same Ceph Storage Cluster). +Specify these values in a configuration file +(e.g., ``cluster-data-sync.conf``), and include a ``log_file`` name. + + +For example: + +.. code-block:: ini + + src_access_key: {source-access-key} + src_secret_key: {source-secret-key} + destination: https://zone-name.fqdn.com:port + dest_access_key: {destination-access-key} + dest_secret_key: {destination-secret-key} + log_file: {log.filename} + +A concrete example may look like this: + +.. code-block:: ini + + src_access_key: DG8RE354EFPZBICHIAF0 + src_secret_key: i3U0HiRP8CXaBWrcF8bbh6CbsxGYuPPwRkixfFSb + destination: https://us-west.storage.net:80 + dest_access_key: U60RFI6B08F32T2PD30G + dest_secret_key: W3HuUor7Gl1Ee93pA2pq2wFk1JMQ7hTrSDecYExl + log_file: /var/log/radosgw/radosgw-sync-us-east-west.log + +To activate the data synchronization agent, open a terminal and +execute the following:: + + radosgw-agent -c region-data-sync.conf + +When the synchronization agent is running, you should see output +indicating that the agent is synchronizing shards of data. :: + + INFO:radosgw_agent.sync:Starting incremental sync + INFO:radosgw_agent.worker:17910 is processing shard number 0 + INFO:radosgw_agent.worker:shard 0 has 0 entries after '' + INFO:radosgw_agent.worker:finished processing shard 0 + INFO:radosgw_agent.worker:17910 is processing shard number 1 + INFO:radosgw_agent.sync:1/64 shards processed + INFO:radosgw_agent.worker:shard 1 has 0 entries after '' + INFO:radosgw_agent.worker:finished processing shard 1 + INFO:radosgw_agent.sync:2/64 shards processed + ... + +.. note:: You must have an agent for each source-destination pair. + + +Inter-Region Metadata Replication +================================= + +The data synchronization agent replicates the metadata of master zone in the +master region to a master zone in a secondary region. Metadata consists of +gateway users and buckets, but not the objects within the buckets--ensuring a +unified namespace across the cluster. The master zone of the master region is +the source for the master zone of the secondary region and it gets selected +automatically. + +.. image:: ../images/region-zone-sync.png + :align: center + +Follow the same steps in `Multi-Site Data Replication`_ by specifying the master +zone of the master region as the source zone and the master zone of the +secondary region as the secondary zone. When activating the ``radosgw-agent``, +specify ``--metadata-only`` so that it only copies metadata. For example:: + + radosgw-agent -c inter-region-data-sync.conf --metadata-only + +Once you have completed the foregoing procedure, you should have a cluster +consisting of a master region (``us``) and a secondary region (``eu``) where +there is a unified namespace between the two regions. + + + +.. _CRUSH Map: ../../rados/operations/crush-map +.. _Install Apache, FastCGI and Gateway: ../manual-install +.. _Cephx Administration: ../../rados/operations/authentication/#cephx-administration +.. _Ceph configuration file: ../../rados/configuration/ceph-conf +.. _Configuration Reference - Pools: ../config-ref#pools +.. _Configuration Reference - Regions: ../config-ref#regions +.. _Configuration Reference - Zones: ../config-ref#zones +.. _Pools: ../../rados/operations/pools +.. _Simple Configuration: ../config \ No newline at end of file diff --git a/doc/radosgw/index.rst b/doc/radosgw/index.rst index 9f6274c232609..d9be66b3d8f69 100644 --- a/doc/radosgw/index.rst +++ b/doc/radosgw/index.rst @@ -36,8 +36,9 @@ you may write data with one API and retrieve it with the other. .. toctree:: :maxdepth: 1 - Manual Install - Configuration + Manual Install <../../install/install-ceph-gateway> + Simple Configuration + Federated Configuration Config Reference Purging Temp Data S3 API diff --git a/doc/radosgw/manual-install.rst b/doc/radosgw/manual-install.rst deleted file mode 100644 index 6b9b7e59d1f35..0000000000000 --- a/doc/radosgw/manual-install.rst +++ /dev/null @@ -1,132 +0,0 @@ -===================================== - Install Apache, FastCGI and Gateway -===================================== - -Install Packages -================ - -To install Ceph Object Gateway, you must install Apache and FastCGI first. :: - - sudo apt-get update && sudo apt-get install apache2 libapache2-mod-fastcgi - - -100-Continue Support --------------------- - -The Ceph community provides a slightly optimized version of the ``apache2`` -and ``fastcgi`` packages. The material difference is that the Ceph packages are -optimized for the ``100-continue`` HTTP response, where the server determines -if it will accept the request by first evaluating the request header. See `RFC -2616, Section 8`_ for details on ``100-continue``. You can find the Apache and -FastCGI packages modified for Ceph here: - -- `Apache Oneiric`_ -- `Apache Precise`_ -- `Apache Quantal for ARM (Calxeda)`_ -- `FastCGI Oneric`_ -- `FastCGI Precise`_ -- `FastCGI Quantal for ARM (Calxeda)`_ - -You may also clone Ceph's Apache and FastCGI git repositories:: - - git clone --recursive https://github.com/ceph/mod_fastcgi.git - git clone --recursive https://github.com/ceph/apache2.git - -.. _Apache Oneiric: http://gitbuilder.ceph.com/apache2-deb-oneiric-x86_64-basic/ -.. _Apache Precise: http://gitbuilder.ceph.com/apache2-deb-precise-x86_64-basic/ -.. _Apache Quantal for ARM (Calxeda): http://gitbuilder.ceph.com/apache2-deb-quantal-arm7l-basic/ -.. _FastCGI Oneric: http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-oneiric-x86_64-basic/ -.. _FastCGI Precise: http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-precise-x86_64-basic/ -.. _FastCGI Quantal for ARM (Calxeda): http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-quantal-arm7l-basic/ -.. _RFC 2616, Section 8: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html - -.. important:: If you do NOT use a modified fastcgi as described above, - you should disable 100-Continue support by adding the following to - your ``ceph.conf``:: - - rgw print continue = false - - -Apache Configuration -==================== - -Enable the URL rewrite modules for Apache and FastCGI. For example:: - - sudo a2enmod rewrite - sudo a2enmod fastcgi - -By default, the ``/etc/apache2/httpd.conf`` or ``/etc/apache2/apache2.conf`` -file is blank. Add a line for the ``ServerName`` and provide the fully -qualified domain name of the host where you will install the Ceph Object -Gateway. For example:: - - ServerName {fqdn} - -Restart Apache so that the foregoing changes take effect. :: - - sudo service apache2 restart - -Then, install Ceph Object Gateway and its sync agent. For example:: - - sudo apt-get install radosgw radosgw-agent - - -Enable SSL ----------- - -Some REST clients use HTTPS by default. So you should consider enabling SSL -for Apache on the server machine. :: - - sudo a2enmod ssl - -Once you enable SSL, you should generate an SSL certificate. :: - - sudo mkdir /etc/apache2/ssl - sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/apache2/ssl/apache.key -out /etc/apache2/ssl/apache.crt - - -.. note:: The foregoing example uses self-certified certificates. Some client - APIs check for a trusted certificate authority. So you may need to obtain - a SSL certificate from a trusted authority to use those client APIs. - -Then, restart Apache. :: - - service apache2 restart - - -Add Wildcard to DNS -=================== - -To use Ceph with S3-style subdomains (e.g., ``bucket-name.domain-name.com``), -you need to add a wildcard to the DNS record of the DNS server you use with the -``radosgw`` daemon. - -.. tip:: The address of the DNS must also be specified in the Ceph - configuration file with the ``rgw dns name = {hostname}`` setting. - -For ``dnsmasq``, consider addding the following ``address`` setting with a dot -(.) prepended to the host name:: - - address=/.{hostname-or-fqdn}/{host-ip-address} - address=/.ceph-node/192.168.0.1 - -For ``bind``, consider adding the a wildcard to the DNS record:: - - $TTL 604800 - @ IN SOA ceph-node. root.ceph-node. ( - 2 ; Serial - 604800 ; Refresh - 86400 ; Retry - 2419200 ; Expire - 604800 ) ; Negative Cache TTL - ; - @ IN NS ceph-node. - @ IN A 192.168.122.113 - * IN CNAME @ - -Restart your DNS server and ping your server with a subdomain to -ensure that your Ceph Object Store ``radosgw`` daemon can process -the subdomain requests. :: - - ping mybucket.{fqdn} - ping mybucket.ceph-node diff --git a/doc/radosgw/rgw.conf b/doc/radosgw/rgw.conf new file mode 100644 index 0000000000000..7cc73070651d5 --- /dev/null +++ b/doc/radosgw/rgw.conf @@ -0,0 +1,30 @@ +FastCgiExternalServer /var/www/s3gw.fcgi -socket /{path}/{socket-name}.sock + + + + + ServerName {fqdn} + + + ServerAdmin {email.address} + DocumentRoot /var/www + RewriteEngine On + RewriteRule ^/(.*) /s3gw.fcgi?%{QUERY_STRING}[E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L] + + + + Options +ExecCGI + AllowOverride All + SetHandler fastcgi-script + Order allow,deny + Allow from all + AuthBasicAuthoritative Off + + + + AllowEncodedSlashes On + ErrorLog /var/log/apache2/error.log + CustomLog /var/log/apache2/access.log combined + ServerSignature Off + + \ No newline at end of file diff --git a/doc/radosgw/troubleshooting.rst b/doc/radosgw/troubleshooting.rst index 9c63742cb9716..918d0a30a5fac 100644 --- a/doc/radosgw/troubleshooting.rst +++ b/doc/radosgw/troubleshooting.rst @@ -17,6 +17,14 @@ the startup script is trying to start the process as a ``www-data`` or ``apache`` user and an existing ``.asok`` is preventing the script from starting the daemon. +The radosgw init script (/etc/init.d/radosgw) also has a verbose argument that +can provide some insight as to what could be the issue: + + /etc/init.d/radosgw start -v + +or + + /etc/init.d radosgw start --verbose HTTP Request Errors =================== diff --git a/doc/release-notes.rst b/doc/release-notes.rst index b8264146c88a6..6738e2f21e241 100644 --- a/doc/release-notes.rst +++ b/doc/release-notes.rst @@ -407,6 +407,12 @@ v0.69 Upgrading ~~~~~~~~~ +* The sysvinit /etc/init.d/ceph script will, by default, update the + CRUSH location of an OSD when it starts. Previously, if the + monitors were not available, this command would hang indefinitely. + Now, that step will time out after 10 seconds and the ceph-osd daemon + will not be started. + * Users of the librados C++ API should replace users of get_version() with get_version64() as the old method only returns a 32-bit value for a 64-bit field. The existing 32-bit get_version() method is now diff --git a/doc/start/hardware-recommendations.rst b/doc/start/hardware-recommendations.rst index 90d29e5e7e2c0..c589301a435c2 100644 --- a/doc/start/hardware-recommendations.rst +++ b/doc/start/hardware-recommendations.rst @@ -10,11 +10,7 @@ issues. Hardware planning should include distributing Ceph daemons and other processes that use Ceph across many hosts. Generally, we recommend running Ceph daemons of a specific type on a host configured for that type of daemon. We recommend using other hosts for processes that utilize your -data cluster (e.g., OpenStack, CloudStack, etc). - -`Inktank`_ provides excellent premium support for hardware planning. - -.. _Inktank: http://www.inktank.com +data cluster (e.g., OpenStack, CloudStack, etc). .. tip:: Check out the Ceph blog too. Articles like `Ceph Write Throughput 1`_, @@ -45,9 +41,9 @@ RAM Metadata servers and monitors must be capable of serving their data quickly, so they should have plenty of RAM (e.g., 1GB of RAM per daemon instance). OSDs do -not require as much RAM for regular operations (e.g., 200MB of RAM per daemon -instance); however, during recovery they need significantly more RAM (e.g., -500MB-1GB). Generally, more RAM is better. +not require as much RAM for regular operations (e.g., 500MB of RAM per daemon +instance); however, during recovery they need significantly more RAM (e.g., ~1GB +per 1TB of storage per daemon). Generally, more RAM is better. Data Storage @@ -62,7 +58,7 @@ for production, but it has the ability to journal and write data simultaneously, whereas XFS and ext4 do not. .. important:: Since Ceph has to write all data to the journal before it can - send an ACK (for XFS and EXT4 at least), having the journals and OSD + send an ACK (for XFS and EXT4 at least), having the journal and OSD performance in balance is really important! @@ -78,7 +74,10 @@ example, a 1 terabyte hard disk priced at $75.00 has a cost of $0.07 per gigabyte (i.e., $75 / 1024 = 0.0732). By contrast, a 3 terabyte hard disk priced at $150.00 has a cost of $0.05 per gigabyte (i.e., $150 / 3072 = 0.0488). In the foregoing example, using the 1 terabyte disks would generally increase the cost -per gigabyte by 40%--rendering your cluster substantially less cost efficient. +per gigabyte by 40%--rendering your cluster substantially less cost efficient. +Also, the larger the storage drive capacity, the more memory per Ceph OSD Daemon +you will need, especially during rebalancing, backfilling and recovery. A +general rule of thumb is ~1GB of RAM for 1TB of storage space. .. tip:: Running multiple OSDs on a single disk--irrespective of partitions--is **NOT** a good idea. @@ -90,18 +89,18 @@ Storage drives are subject to limitations on seek time, access time, read and write times, as well as total throughput. These physical limitations affect overall system performance--especially during recovery. We recommend using a dedicated drive for the operating system and software, and one drive for each -OSD daemon you run on the host. Most "slow OSD" issues arise due to running an -operating system, multiple OSDs, and/or multiple journals on the same drive. +Ceph OSD Daemon you run on the host. Most "slow OSD" issues arise due to running +an operating system, multiple OSDs, and/or multiple journals on the same drive. Since the cost of troubleshooting performance issues on a small cluster likely exceeds the cost of the extra disk drives, you can accelerate your cluster design planning by avoiding the temptation to overtax the OSD storage drives. -You may run multiple OSDs per hard disk drive, but this will likely lead to -resource contention and diminish the overall throughput. You may store a journal -and object data on the same drive, but this may increase the time it takes to -journal a write and ACK to the client. Ceph must write to the journal before it -can ACK the write. The btrfs filesystem can write journal data and object data -simultaneously, whereas XFS and ext4 cannot. +You may run multiple Ceph OSD Daemons per hard disk drive, but this will likely +lead to resource contention and diminish the overall throughput. You may store a +journal and object data on the same drive, but this may increase the time it +takes to journal a write and ACK to the client. Ceph must write to the journal +before it can ACK the write. The btrfs filesystem can write journal data and +object data simultaneously, whereas XFS and ext4 cannot. Ceph best practices dictate that you should run operating systems, OSD data and OSD journals on separate drives. @@ -129,10 +128,10 @@ sequential write throughput when storing multiple journals for multiple OSDs. SSD in a test configuration to gauge performance. Since SSDs have no moving mechanical parts, it makes sense to use them in the -areas of Ceph that do not use a lot of storage space. Relatively inexpensive -SSDs may appeal to your sense of economy. Use caution. Acceptable IOPS are not -enough when selecting an SSD for use with Ceph. There are a few important -performance considerations for journals and SSDs: +areas of Ceph that do not use a lot of storage space (e.g., journals). +Relatively inexpensive SSDs may appeal to your sense of economy. Use caution. +Acceptable IOPS are not enough when selecting an SSD for use with Ceph. There +are a few important performance considerations for journals and SSDs: - **Write-intensive semantics:** Journaling involves write-intensive semantics, so you should ensure that the SSD you choose to deploy will perform equal to @@ -251,33 +250,41 @@ Minimum Hardware Recommendations Ceph can run on inexpensive commodity hardware. Small production clusters and development clusters can run successfully with modest hardware. -+--------------+----------------+------------------------------------+ -| Process | Criteria | Minimum Recommended | -+==============+================+====================================+ -| ``ceph-osd`` | Processor | 1x 64-bit AMD-64/i386 dual-core | -| +----------------+------------------------------------+ -| | RAM | 500 MB per daemon | -| +----------------+------------------------------------+ -| | Volume Storage | 1x Disk per daemon | -| +----------------+------------------------------------+ -| | Network | 2x 1GB Ethernet NICs | -+--------------+----------------+------------------------------------+ -| ``ceph-mon`` | Processor | 1x 64-bit AMD-64/i386 | -| +----------------+------------------------------------+ -| | RAM | 1 GB per daemon | -| +----------------+------------------------------------+ -| | Disk Space | 10 GB per daemon | -| +----------------+------------------------------------+ -| | Network | 2x 1GB Ethernet NICs | -+--------------+----------------+------------------------------------+ -| ``ceph-mds`` | Processor | 1x 64-bit AMD-64/i386 quad-core | -| +----------------+------------------------------------+ -| | RAM | 1 GB minimum per daemon | -| +----------------+------------------------------------+ -| | Disk Space | 1 MB per daemon | -| +----------------+------------------------------------+ -| | Network | 2x 1GB Ethernet NICs | -+--------------+----------------+------------------------------------+ ++--------------+----------------+-----------------------------------------+ +| Process | Criteria | Minimum Recommended | ++==============+================+=========================================+ +| ``ceph-osd`` | Processor | - 1x 64-bit AMD-64 | +| | | - 1x 32-bit ARM dual-core or better | +| | | - 1x i386 dual-core | +| +----------------+-----------------------------------------+ +| | RAM | ~1GB for 1TB of storage per daemon | +| +----------------+-----------------------------------------+ +| | Volume Storage | 1x storage drive per daemon | +| +----------------+-----------------------------------------+ +| | Journal | 1x SSD partition per daemon (optional) | +| +----------------+-----------------------------------------+ +| | Network | 2x 1GB Ethernet NICs | ++--------------+----------------+-----------------------------------------+ +| ``ceph-mon`` | Processor | - 1x 64-bit AMD-64/i386 | +| | | - 1x 32-bit ARM dual-core or better | +| | | - 1x i386 dual-core | +| +----------------+-----------------------------------------+ +| | RAM | 1 GB per daemon | +| +----------------+-----------------------------------------+ +| | Disk Space | 10 GB per daemon | +| +----------------+-----------------------------------------+ +| | Network | 2x 1GB Ethernet NICs | ++--------------+----------------+-----------------------------------------+ +| ``ceph-mds`` | Processor | - 1x 64-bit AMD-64 quad-core | +| | | - 1x 32-bit ARM quad-core | +| | | - 1x i386 quad-core | +| +----------------+-----------------------------------------+ +| | RAM | 1 GB minimum per daemon | +| +----------------+-----------------------------------------+ +| | Disk Space | 1 MB per daemon | +| +----------------+-----------------------------------------+ +| | Network | 2x 1GB Ethernet NICs | ++--------------+----------------+-----------------------------------------+ .. tip:: If you are running an OSD with a single disk, create a partition for your volume storage that is separate from the partition @@ -285,13 +292,16 @@ and development clusters can run successfully with modest hardware. OS and the volume storage. -Production Cluster Example -========================== +Production Cluster Examples +=========================== Production clusters for petabyte scale data storage may also use commodity hardware, but should have considerably more memory, processing power and data storage to account for heavy traffic loads. +Dell Example +------------ + A recent (2012) Ceph cluster project is using two fairly robust hardware configurations for Ceph OSDs, and a lighter configuration for monitors. @@ -326,6 +336,43 @@ configurations for Ceph OSDs, and a lighter configuration for monitors. +----------------+----------------+------------------------------------+ +Calxeda Example +--------------- + +A recent (2013) Ceph cluster project uses ARM hardware to obtain low +power consumption and high storage density. + ++----------------+----------------+----------------------------------------+ +| Configuration | Criteria | Minimum Recommended | ++================+================+========================================+ +| SuperMicro | Processor Card | 3x Calxeda EnergyCard building blocks | +| SC 847 Chassis +----------------+----------------------------------------+ +| 4U | CPU | 4x ECX-1000 ARM 1.4 GHz SoC per card | +| +----------------+----------------------------------------+ +| | RAM | 4 GB per System-on-a-chip (SoC) | +| +----------------+----------------------------------------+ +| | Volume Storage | 36x 3TB Seagate Barracuda SATA | +| +----------------+----------------------------------------+ +| | Client Network | 1x 10GB Ethernet NICs | +| +----------------+----------------------------------------+ +| | OSD Network | 1x 10GB Ethernet NICs | +| +----------------+----------------------------------------+ +| | Mgmt. Network | 1x 1GB Ethernet NICs | ++----------------+----------------+----------------------------------------+ + +The chassis configuration enables the deployment of 36 Ceph OSD Daemons per +chassis, one for each 3TB drive. Each System-on-a-chip (SoC) processor runs 3 +Ceph OSD Daemons. Four SoC processors per card allows the 12 processors to run +36 Ceph OSD Daemons with capacity remaining for rebalancing, backfilling and +recovery. This configuration provides 108TB of storage (slightly less after full +ratio settings) per 4U chassis. Using a chassis exclusively for Ceph OSD Daemons +makes it easy to expand the cluster's storage capacity significantly with +relative ease. + +**Note:** the project uses Ceph for cold storage, so there are no SSDs +for journals. + + .. _Ceph Write Throughput 1: http://ceph.com/community/ceph-performance-part-1-disk-controller-write-throughput/ .. _Ceph Write Throughput 2: http://ceph.com/community/ceph-performance-part-2-write-throughput-without-ssd-journals/ .. _Argonaut v. Bobtail Performance Preview: http://ceph.com/uncategorized/argonaut-vs-bobtail-performance-preview/ diff --git a/doc/start/index.rst b/doc/start/index.rst index 6e9277746d9b9..f0e80b055ce07 100644 --- a/doc/start/index.rst +++ b/doc/start/index.rst @@ -40,7 +40,6 @@ Ceph Block Devices, the Ceph Filesystem, and Ceph Object Storage. Filesystem Quick Start Object Storage Quick Start - .. raw:: html diff --git a/doc/start/os-recommendations.rst b/doc/start/os-recommendations.rst index d8b418fe1b0d3..31d0943cfcb6c 100644 --- a/doc/start/os-recommendations.rst +++ b/doc/start/os-recommendations.rst @@ -37,6 +37,37 @@ specific distributions aside from the kernel and system initialization package (i.e., sysvinit, upstart, systemd). +Emperor (0.71) +-------------- + ++----------+----------+--------------------+--------------+---------+------------+ +| Distro | Release | Code Name | Kernel | Notes | Testing | ++==========+==========+====================+==============+=========+============+ +| Ubuntu | 12.04 | Precise Pangolin | linux-3.2.0 | 1, 2 | B, I, C | ++----------+----------+--------------------+--------------+---------+------------+ +| Ubuntu | 12.10 | Quantal Quetzal | linux-3.5.4 | 2, 4 | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Ubuntu | 13.04 | Raring Ringtail | linux-3.8.5 | 4 | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Ubuntu | 13.10 | Saucy Salamander | linux-3.11.2 | 4 | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Debian | 6.0 | Squeeze | linux-2.6.32 | 1, 2, 3 | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Debian | 7.0 | Wheezy | linux-3.2.0 | 1, 2 | B | ++----------+----------+--------------------+--------------+---------+------------+ +| CentOS | 6.3 | N/A | linux-2.6.32 | 1, 2 | B, I | ++----------+----------+--------------------+--------------+---------+------------+ +| RHEL | 6.3 | | linux-2.6.32 | 1, 2 | B, I | ++----------+----------+--------------------+--------------+---------+------------+ +| Fedora | 18.0 | Spherical Cow | linux-3.6.0 | | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Fedora | 19.0 | Schrödinger's Cat | linux-3.10.0 | | B | ++----------+----------+--------------------+--------------+---------+------------+ +| OpenSuse | 12.2 | N/A | linux-3.4.0 | 2 | B | ++----------+----------+--------------------+--------------+---------+------------+ + + + Dumpling (0.67) --------------- @@ -158,6 +189,8 @@ Notes ``ceph-osd`` daemons using ``XFS`` or ``ext4`` on the same host will not perform as well as they could. +- **4**: Ceph provides ARM support for Quantal and Raring. Saucy support is + not supported yet, but support is coming soon. Testing ------- diff --git a/doc/start/quick-cephfs.rst b/doc/start/quick-cephfs.rst index 5449e5a6fe328..205298713c4d4 100644 --- a/doc/start/quick-cephfs.rst +++ b/doc/start/quick-cephfs.rst @@ -9,13 +9,21 @@ start on the Admin Host. Prerequisites ============= -Install ``ceph-common``. :: +#. Verify that you have an appropriate version of the Linux kernel. + See `OS Recommendations`_ for details. :: + + lsb_release -a + uname -r - sudo apt-get install ceph-common +#. On the admin node, use ``ceph-deploy`` to install Ceph on your + ``ceph-client`` node. :: -Ensure that the :term:`Ceph Storage Cluster` is running and in an ``active + -clean`` state. Also, ensure that you have at least one :term:`Ceph Metadata -Server` running. :: + ceph-deploy install ceph-client + + +#. Ensure that the :term:`Ceph Storage Cluster` is running and in an ``active + + clean`` state. Also, ensure that you have at least one :term:`Ceph Metadata + Server` running. :: ceph -s [-m {monitor-ip-address}] [-k {path/to/ceph.client.admin.keyring}] @@ -94,4 +102,5 @@ if you encounter trouble. .. _Storage Cluster Quick Start: ../quick-ceph-deploy .. _Ceph FS: ../../cephfs/ .. _FAQ: http://wiki.ceph.com/03FAQs/01General_FAQ#How_Can_I_Give_Ceph_a_Try.3F -.. _Troubleshooting: ../../cephfs/troubleshooting \ No newline at end of file +.. _Troubleshooting: ../../cephfs/troubleshooting +.. _OS Recommendations: ../os-recommendations diff --git a/doc/start/quick-rbd.rst b/doc/start/quick-rbd.rst index 9424457f8c2f0..1bd194f49a863 100644 --- a/doc/start/quick-rbd.rst +++ b/doc/start/quick-rbd.rst @@ -27,6 +27,12 @@ Storage Cluster nodes (unless you use a VM). See `FAQ`_ for details. Install Ceph ============ +#. Verify that you have an appropriate version of the Linux kernel. + See `OS Recommendations`_ for details. :: + + lsb_release -a + uname -r + #. On the admin node, use ``ceph-deploy`` to install Ceph on your ``ceph-client`` node. :: @@ -72,3 +78,4 @@ See `block devices`_ for additional details. .. _Storage Cluster Quick Start: ../quick-ceph-deploy .. _block devices: ../../rbd/rbd .. _FAQ: http://wiki.ceph.com/03FAQs/01General_FAQ#How_Can_I_Give_Ceph_a_Try.3F +.. _OS Recommendations: ../os-recommendations diff --git a/doc/start/quick-rgw.rst b/doc/start/quick-rgw.rst index 40cf7d4f4dca2..1de018e0d5c25 100644 --- a/doc/start/quick-rgw.rst +++ b/doc/start/quick-rgw.rst @@ -1,351 +1,11 @@ -============================ - Object Storage Quick Start -============================ - -To use this guide, you must have executed the procedures in the `Storage Cluster -Quick Start`_ guide first. Ensure your :term:`Ceph Storage Cluster` is in an -``active + clean`` state before working with the :term:`Ceph Object Storage`. - -.. note:: Ceph Object Storage is also referred to as RADOS Gateway. - - -Install Apache and FastCGI -========================== - -:term:`Ceph Object Storage` runs on Apache and FastCGI in conjunction with the -:term:`Ceph Storage Cluster`. Install Apache and FastCGI on the server node. Use -the following procedure: - -#. Install Apache and FastCGI on the server machine. :: - - sudo apt-get update && sudo apt-get install apache2 libapache2-mod-fastcgi - -#. Enable the URL rewrite modules for Apache and FastCGI. :: - - sudo a2enmod rewrite - sudo a2enmod fastcgi - -#. Add a line for the ``ServerName`` in the Apache configuration file - (e.g., ``/etc/apache2/httpd.conf`` or ``/etc/apache2/apache2.conf``). - Provide the fully qualified domain name of the server machine - (e.g., ``hostname -f``). :: - - ServerName {fqdn} - -#. Restart Apache so that the foregoing changes take effect. :: - - sudo service apache2 restart - - -Install Ceph Object Storage +=========================== + Quick Ceph Object Storage =========================== -Once you have installed and configured Apache and FastCGI, you may install -the Ceph Object Storage daemon (``radosgw``). :: - - sudo apt-get install radosgw - -For details on the preceding steps, see `Ceph Object Storage Manual Install`_. - - -Create a Data Directory -======================= - -Create a data directory on the server node for the instance of ``radosgw``. - -:: - - sudo mkdir -p /var/lib/ceph/radosgw/ceph-radosgw.gateway - - -Modify the Ceph Configuration File -================================== - -On the admin node, perform the following steps: - -#. Open the Ceph configuration file. :: - - vim ceph.conf - -#. Add the following settings to the Ceph configuration file:: - - [client.radosgw.gateway] - host = {host-name} - keyring = /etc/ceph/keyring.radosgw.gateway - rgw socket path = /tmp/radosgw.sock - log file = /var/log/ceph/radosgw.log - - #Add DNS hostname to enable S3 subdomain calls - rgw dns name = {hostname} - -#. Use ``ceph-deploy`` to push a copy the configuration file from the admin - node to the server node. :: - - ceph-deploy --overwrite-conf config push {hostname} - - -Create a Gateway Configuration File -=================================== - -The example configuration file will configure the gateway on the server node to -operate with the Apache FastCGI module, a rewrite rule for OpenStack Swift, and -paths for the log files. To add a configuration file for Ceph Object Storage, -we suggest copying the contents of the example file below to an editor. Then, -follow the steps below to modify it (on your server node). - -.. literalinclude:: rgw.conf - :language: ini - -#. Replace the ``{fqdn}`` entry with the fully-qualified domain name of the - server server. - -#. Replace the ``{email.address}`` entry with the email address for the - server administrator. - -#. Add a ``ServerAlias`` if you wish to use S3-style subdomains. - -#. Save the contents to the ``/etc/apache2/sites-available`` directory on - the server machine. - -#. Enable the site for ``rgw.conf``. :: - - sudo a2ensite rgw.conf - -#. Disable the default site. :: - - sudo a2dissite default - -See `Create rgw.conf`_ for additional details. - - -Add a FastCGI Script -==================== - -FastCGI requires a script for the S3-compatible interface. To create the -script, execute the following procedures on the server node. - -#. Go to the ``/var/www`` directory. :: - - cd /var/www - -#. Open an editor with the file name ``s3gw.fcgi``. :: - - sudo vim s3gw.fcgi - -#. Copy the following into the editor. :: - - #!/bin/sh - exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway - -#. Save the file. - -#. Change the permissions on the file so that it is executable. :: - - sudo chmod +x s3gw.fcgi - - -Generate a Keyring and Key -========================== - -Perform the following steps on the server machine. - -#. Ensure the server node is set up with administrator privileges. From - the admin node, execute the following:: - - ceph-deploy admin {hostname} - -#. Create a keyring for Ceph Object Storage. :: - - sudo ceph-authtool --create-keyring /etc/ceph/keyring.radosgw.gateway - sudo chmod +r /etc/ceph/keyring.radosgw.gateway - -#. Create a key for Ceph Object Storage to authenticate with the Ceph Storage - Cluster. :: - - sudo ceph-authtool /etc/ceph/keyring.radosgw.gateway -n client.radosgw.gateway --gen-key - sudo ceph-authtool -n client.radosgw.gateway --cap osd 'allow rwx' --cap mon 'allow rw' /etc/ceph/keyring.radosgw.gateway - -#. Add the key to the Ceph keyring. :: - - sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.gateway -i /etc/ceph/keyring.radosgw.gateway - - -Enable SSL -========== - -Some REST clients use HTTPS by default. So you should consider enabling SSL -for Apache on the server machine. :: - - sudo a2enmod ssl - -Once you enable SSL, you should use a trusted SSL certificate. You can -generate a non-trusted SSL certificate using the following:: - - sudo mkdir /etc/apache2/ssl - sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/apache2/ssl/apache.key -out /etc/apache2/ssl/apache.crt - -Then, restart Apache. :: - - service apache2 restart - - -Add Wildcard to DNS -=================== - -To use Ceph with S3-style subdomains (e.g., ``bucket-name.domain-name.com``), -you need to add a wildcard to the DNS record of the DNS server you use with the -``radosgw`` daemon. - -.. tip:: The address of the DNS must also be specified in the Ceph - configuration file with the ``rgw dns name = {hostname}`` setting. - -For ``dnsmasq``, consider addding the following ``address`` setting with a dot -(.) prepended to the host name:: - - address=/.{hostname-or-fqdn}/{host-ip-address} - address=/.ceph-node/192.168.0.1 - -For ``bind``, consider adding the a wildcard to the DNS record:: - - $TTL 604800 - @ IN SOA ceph-node. root.ceph-node. ( - 2 ; Serial - 604800 ; Refresh - 86400 ; Retry - 2419200 ; Expire - 604800 ) ; Negative Cache TTL - ; - @ IN NS ceph-node. - @ IN A 192.168.122.113 - * IN CNAME @ - -Restart your DNS server and ping your server with a subdomain to -ensure that your Ceph Object Store ``radosgw`` daemon can process -the subdomain requests. :: - - ping mybucket.{fqdn} - ping mybucket.ceph-node - - -Restart Services -================ - -To ensure that all components have reloaded their configurations, -we recommend restarting your ``ceph`` and ``apaches`` services. Then, -start up the ``radosgw`` service. For example:: - - sudo service ceph restart - sudo service apache2 restart - sudo /etc/init.d/radosgw start - - -Create a User -============= - -To use the Gateway, you must create a Gateway user. First, create a gateway user -for the S3-compatible interface; then, create a subuser for the -Swift-compatible interface. - -Gateway (S3) User ------------------ - -First, create a Gateway user for the S3-compatible interface. :: - - sudo radosgw-admin user create --uid="{username}" --display-name="{Display Name}" - -For example:: - - radosgw-admin user create --uid=johndoe --display-name="John Doe" --email=john@example.com - -.. code-block:: javascript - - { "user_id": "johndoe", - "rados_uid": 0, - "display_name": "John Doe", - "email": "john@example.com", - "suspended": 0, - "subusers": [], - "keys": [ - { "user": "johndoe", - "access_key": "QFAMEDSJP5DEKJO0DDXY", - "secret_key": "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87"}], - "swift_keys": [] - } - -Creating a user creates an access_key and secret_key entry for use with any S3 -API-compatible client. - -.. important:: Check the key output. Sometimes radosgw-admin generates a key - with an escape (\) character, and some clients do not know how to handle - escape characters. Remedies include removing the escape character (\), - encapsulating the string in quotes, or simply regenerating the key and - ensuring that it does not have an escape character. - -Subuser -------- - -Next, create a subuser for the Swift-compatible interface. :: - - sudo radosgw-admin subuser create --uid=johndoe --subuser=johndoe:swift --access=full - -.. code-block:: javascript - - { "user_id": "johndoe", - "rados_uid": 0, - "display_name": "John Doe", - "email": "john@example.com", - "suspended": 0, - "subusers": [ - { "id": "johndoe:swift", - "permissions": "full-control"}], - "keys": [ - { "user": "johndoe", - "access_key": "QFAMEDSJP5DEKJO0DDXY", - "secret_key": "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87"}], - "swift_keys": []} - -:: - - sudo radosgw-admin key create --subuser=johndoe:swift --key-type=swift --gen-secret - -.. code-block:: javascript - - { "user_id": "johndoe", - "rados_uid": 0, - "display_name": "John Doe", - "email": "john@example.com", - "suspended": 0, - "subusers": [ - { "id": "johndoe:swift", - "permissions": "full-control"}], - "keys": [ - { "user": "johndoe", - "access_key": "QFAMEDSJP5DEKJO0DDXY", - "secret_key": "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87"}], - "swift_keys": [ - { "user": "johndoe:swift", - "secret_key": "E9T2rUZNu2gxUjcwUBO8n\/Ev4KX6\/GprEuH4qhu1"}]} - -This step enables you to use any Swift client to connect to and use RADOS -Gateway via the Swift-compatible API. - -RGW's ``user:subuser`` tuple maps to the ``tenant:user`` tuple expected by Swift. - -.. note:: RGW's Swift authentication service only supports - built-in Swift authentication (``-V 1.0``) at this point. See - `RGW Configuration`_ for Keystone integration details. - - -Summary -------- - -Once you have completed this Quick Start, you may use the Ceph Object Store -tutorials. See the `S3-compatible`_ and `Swift-compatible`_ APIs for details. - +At this time, ``ceph-deploy`` does not provide a rapid installation for +:term:`Ceph Object Storage`. To install a :term:`Ceph Object Gateway`, +see `Install Ceph Object Gateway`_. To configure a Ceph Object Gateway, +see `Configuring Ceph Object Gateway`_. -.. _Create rgw.conf: ../../radosgw/config/index.html#create-rgw-conf -.. _Storage Cluster Quick Start: ../quick-ceph-deploy -.. _Ceph Object Storage Manual Install: ../../radosgw/manual-install -.. _RGW Configuration: ../../radosgw/config -.. _S3-compatible: ../../radosgw/s3 -.. _Swift-compatible: ../../radosgw/swift +.. _Install Ceph Object Gateway: ../../install/install-ceph-gateway +.. _Configuring Ceph Object Gateway: ../../radosgw/config \ No newline at end of file diff --git a/doc/start/quick-start-preflight.rst b/doc/start/quick-start-preflight.rst index 77a54795f1948..eb4bf6e430497 100644 --- a/doc/start/quick-start-preflight.rst +++ b/doc/start/quick-start-preflight.rst @@ -118,8 +118,8 @@ For Debian and Ubuntu distributions, perform the following steps: Red Hat Package Manager (RPM) ----------------------------- -For Red Hat(rhel6), CentOS (el6), Fedora 17-19 (f17-f19), OpenSUSE 12 -(opensuse12), and SLES (sles11) perform the following steps: +For Red Hat(rhel6), CentOS (el6), and Fedora 17-19 (f17-f19) perform the +following steps: #. Add the package to your repository. Open a text editor and create a Yellowdog Updater, Modified (YUM) entry. Use the file path diff --git a/m4/ax_c_pretty_func.m4 b/m4/ax_c_pretty_func.m4 new file mode 100644 index 0000000000000..ad76709c33a54 --- /dev/null +++ b/m4/ax_c_pretty_func.m4 @@ -0,0 +1,18 @@ +# +# Test for C compiler support of __PRETTY_FUNCTION__ +# +# - Adapted from ax_c_var_func (Noah Watkins) +# + +AU_ALIAS([AC_C_PRETTY_FUNC], [AX_C_PRETTY_FUNC]) +AC_DEFUN([AX_C_PRETTY_FUNC], +[AC_REQUIRE([AC_PROG_CC]) +AC_CACHE_CHECK(whether $CC recognizes __PRETTY_FUNCTION__, ac_cv_c_pretty_func, +AC_TRY_COMPILE(, +[ +char *s = __PRETTY_FUNCTION__; +], +AC_DEFINE(HAVE_PRETTY_FUNC,, +[Define if the C complier supports __PRETTY_FUNCTION__]) ac_cv_c_pretty_func=yes, +ac_cv_c_pretty_func=no) ) +])dnl diff --git a/m4/ax_c_var_func.m4 b/m4/ax_c_var_func.m4 new file mode 100644 index 0000000000000..8b57563621d8f --- /dev/null +++ b/m4/ax_c_var_func.m4 @@ -0,0 +1,66 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_c_var_func.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_C_VAR_FUNC +# +# DESCRIPTION +# +# This macro tests if the C complier supports the C9X standard __func__ +# indentifier. +# +# The new C9X standard for the C language stipulates that the identifier +# __func__ shall be implictly declared by the compiler as if, immediately +# following the opening brace of each function definition, the declaration +# +# static const char __func__[] = "function-name"; +# +# appeared, where function-name is the name of the function where the +# __func__ identifier is used. +# +# LICENSE +# +# Copyright (c) 2008 Christopher Currie +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 2 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 5 + +AU_ALIAS([AC_C_VAR_FUNC], [AX_C_VAR_FUNC]) +AC_DEFUN([AX_C_VAR_FUNC], +[AC_REQUIRE([AC_PROG_CC]) +AC_CACHE_CHECK(whether $CC recognizes __func__, ac_cv_c_var_func, +AC_TRY_COMPILE(, +[ +char *s = __func__; +], +AC_DEFINE(HAVE_FUNC,, +[Define if the C complier supports __func__]) ac_cv_c_var_func=yes, +ac_cv_c_var_func=no) ) +])dnl diff --git a/m4/ax_cxx_static_cast.m4 b/m4/ax_cxx_static_cast.m4 new file mode 100644 index 0000000000000..e09e6c46cb2da --- /dev/null +++ b/m4/ax_cxx_static_cast.m4 @@ -0,0 +1,43 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_cxx_static_cast.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CXX_STATIC_CAST +# +# DESCRIPTION +# +# If the compiler supports static_cast<>, define HAVE_STATIC_CAST. +# +# LICENSE +# +# Copyright (c) 2008 Todd Veldhuizen +# Copyright (c) 2008 Luc Maisonobe +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AU_ALIAS([AC_CXX_STATIC_CAST], [AX_CXX_STATIC_CAST]) +AC_DEFUN([AX_CXX_STATIC_CAST], +[AC_CACHE_CHECK(whether the compiler supports static_cast<>, +ax_cv_cxx_static_cast, +[AC_LANG_SAVE + AC_LANG_CPLUSPLUS + AC_TRY_COMPILE([#include +class Base { public : Base () {} virtual void f () = 0; }; +class Derived : public Base { public : Derived () {} virtual void f () {} }; +int g (Derived&) { return 0; }],[ +Derived d; Base& b = d; Derived& s = static_cast (b); return g (s);], + ax_cv_cxx_static_cast=yes, ax_cv_cxx_static_cast=no) + AC_LANG_RESTORE +]) +if test "$ax_cv_cxx_static_cast" = yes; then + AC_DEFINE(HAVE_STATIC_CAST,, + [define if the compiler supports static_cast<>]) +fi +]) diff --git a/qa/run_xfstests.sh b/qa/run_xfstests.sh index f9c3e55a79d60..3f5e2eca9f507 100644 --- a/qa/run_xfstests.sh +++ b/qa/run_xfstests.sh @@ -48,7 +48,7 @@ XFS_MKFS_OPTIONS="-l su=32k" # Override the default test list with a list of tests known to pass # until we can work through getting them all passing reliably. -TESTS="1-9 11-15 17 19-21 26-29 31-34 41 46-48 50-54 56 61 63-67 69-70 74-76" +TESTS="1-7 9 11-15 17 19-21 26-29 31-34 41 46-48 50-54 56 61 63-67 69-70 74-76" TESTS="${TESTS} 78 79 84-89 91-92 100 103 105 108 110 116-121 124 126" TESTS="${TESTS} 129-135 137-141 164-167 182 184 187-190 192 194" TESTS="${TESTS} 196 199 201 203 214-216 220-227 234 236-238 241 243-249" @@ -59,6 +59,9 @@ TESTS="${TESTS} 253 257-259 261 262 269 273 275 277 278 280 285 286" ###### # Some explanation of why tests have been excluded above: # +# Test 008 was pulled because it contained a race condition leading to +# spurious failures. +# # Test 049 was pulled because it caused a kernel fault. # http://tracker.newdream.net/issues/2260 # Test 232 was pulled because it caused an XFS error diff --git a/qa/run_xfstests_qemu.sh b/qa/run_xfstests_qemu.sh index 919e46a4d59fc..9dcced7edab38 100644 --- a/qa/run_xfstests_qemu.sh +++ b/qa/run_xfstests_qemu.sh @@ -7,4 +7,4 @@ chmod +x run_xfstests.sh # tests excluded fail in the current testing vm regardless of whether # rbd is used -./run_xfstests.sh -c 1 -f xfs -t /dev/vdb -s /dev/vdc 1-17 19-26 28-49 51-61 63 66-67 69-79 83 85-105 108-110 112-135 137-170 174-191 193-204 206-217 220-227 230-231 233 235-241 243-249 251-262 264-278 281-286 288-289 +./run_xfstests.sh -c 1 -f xfs -t /dev/vdb -s /dev/vdc 1-7 9-17 19-26 28-49 51-61 63 66-67 69-79 83 85-105 108-110 112-135 137-170 174-191 193-204 206-217 220-227 230-231 233 235-241 243-249 251-262 264-278 281-286 288-289 diff --git a/src/auth/AuthAuthorizeHandler.h b/src/auth/AuthAuthorizeHandler.h index 10803d0d7a005..7b67876e44be3 100644 --- a/src/auth/AuthAuthorizeHandler.h +++ b/src/auth/AuthAuthorizeHandler.h @@ -41,12 +41,11 @@ struct AuthAuthorizeHandler { class AuthAuthorizeHandlerRegistry { Mutex m_lock; map m_authorizers; - CephContext *cct; AuthMethodList supported; public: AuthAuthorizeHandlerRegistry(CephContext *cct_, std::string methods) - : m_lock("AuthAuthorizeHandlerRegistry::m_lock"), cct(cct_), supported(cct_, methods) + : m_lock("AuthAuthorizeHandlerRegistry::m_lock"), supported(cct_, methods) {} ~AuthAuthorizeHandlerRegistry(); diff --git a/src/auth/AuthMethodList.cc b/src/auth/AuthMethodList.cc index 50d58bff4d496..0e507fbf43f90 100644 --- a/src/auth/AuthMethodList.cc +++ b/src/auth/AuthMethodList.cc @@ -12,6 +12,8 @@ * */ +#include + #include "common/Mutex.h" #include "common/config.h" #include "common/debug.h" diff --git a/src/auth/KeyRing.cc b/src/auth/KeyRing.cc index 5a8b2288e6aa4..96e56cd487e21 100644 --- a/src/auth/KeyRing.cc +++ b/src/auth/KeyRing.cc @@ -16,6 +16,7 @@ #include #include #include +#include #include "auth/AuthMethodList.h" #include "auth/Crypto.h" diff --git a/src/client/Client.cc b/src/client/Client.cc index 89de94ee6ea5b..a4d5550849519 100644 --- a/src/client/Client.cc +++ b/src/client/Client.cc @@ -84,6 +84,7 @@ using namespace std; #include "ObjecterWriteback.h" #include "include/assert.h" +#include "include/stat.h" #undef dout_prefix #define dout_prefix *_dout << "client." << whoami << " " @@ -109,7 +110,6 @@ Client::CommandHook::CommandHook(Client *client) : bool Client::CommandHook::call(std::string command, cmdmap_t& cmdmap, std::string format, bufferlist& out) { - stringstream ss; Formatter *f = new_formatter(format); f->open_object_section("result"); m_client->client_lock.Lock(); @@ -4503,9 +4503,9 @@ int Client::_setattr(Inode *in, struct stat *attr, int mask, int uid, int gid, I if (in->caps_issued_mask(CEPH_CAP_FILE_EXCL)) { if (mask & (CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME)) { if (mask & CEPH_SETATTR_MTIME) - in->mtime = utime_t(attr->st_mtim.tv_sec, attr->st_mtim.tv_nsec); + in->mtime = utime_t(stat_get_mtime_sec(attr), stat_get_mtime_nsec(attr)); if (mask & CEPH_SETATTR_ATIME) - in->atime = utime_t(attr->st_atim.tv_sec, attr->st_atim.tv_nsec); + in->atime = utime_t(stat_get_atime_sec(attr), stat_get_atime_nsec(attr)); in->ctime = ceph_clock_now(cct); in->time_warp_seq++; mark_caps_dirty(in, CEPH_CAP_FILE_EXCL); @@ -4535,14 +4535,14 @@ int Client::_setattr(Inode *in, struct stat *attr, int mask, int uid, int gid, I req->inode_drop |= CEPH_CAP_AUTH_SHARED; } if (mask & CEPH_SETATTR_MTIME) { - req->head.args.setattr.mtime = - utime_t(attr->st_mtim.tv_sec, attr->st_mtim.tv_nsec); + utime_t mtime = utime_t(stat_get_mtime_sec(attr), stat_get_mtime_nsec(attr)); + req->head.args.setattr.mtime = mtime; req->inode_drop |= CEPH_CAP_AUTH_SHARED | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; } if (mask & CEPH_SETATTR_ATIME) { - req->head.args.setattr.atime = - utime_t(attr->st_atim.tv_sec, attr->st_atim.tv_nsec); + utime_t atime = utime_t(stat_get_atime_sec(attr), stat_get_atime_nsec(attr)); + req->head.args.setattr.atime = atime; req->inode_drop |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; } @@ -4652,16 +4652,16 @@ int Client::fill_stat(Inode *in, struct stat *st, frag_info_t *dirstat, nest_inf st->st_uid = in->uid; st->st_gid = in->gid; if (in->ctime.sec() > in->mtime.sec()) { - st->st_ctim.tv_sec = in->ctime.sec(); - st->st_ctim.tv_nsec = in->ctime.nsec(); + stat_set_ctime_sec(st, in->ctime.sec()); + stat_set_ctime_nsec(st, in->ctime.nsec()); } else { - st->st_ctim.tv_sec = in->mtime.sec(); - st->st_ctim.tv_nsec = in->mtime.nsec(); + stat_set_ctime_sec(st, in->mtime.sec()); + stat_set_ctime_nsec(st, in->mtime.nsec()); } - st->st_atim.tv_sec = in->atime.sec(); - st->st_atim.tv_nsec = in->atime.nsec(); - st->st_mtim.tv_sec = in->mtime.sec(); - st->st_mtim.tv_nsec = in->mtime.nsec(); + stat_set_atime_sec(st, in->atime.sec()); + stat_set_atime_nsec(st, in->atime.nsec()); + stat_set_mtime_sec(st, in->mtime.sec()); + stat_set_mtime_nsec(st, in->mtime.nsec()); if (in->is_dir()) { //st->st_size = in->dirstat.size(); st->st_size = in->rstat.rbytes; @@ -4807,10 +4807,10 @@ int Client::utime(const char *relpath, struct utimbuf *buf) if (r < 0) return r; struct stat attr; - attr.st_mtim.tv_sec = buf->modtime; - attr.st_mtim.tv_nsec = 0; - attr.st_atim.tv_sec = buf->actime; - attr.st_atim.tv_nsec = 0; + stat_set_mtime_sec(&attr, buf->modtime); + stat_set_mtime_nsec(&attr, 0); + stat_set_atime_sec(&attr, buf->actime); + stat_set_atime_nsec(&attr, 0); return _setattr(in, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME); } @@ -4828,10 +4828,10 @@ int Client::lutime(const char *relpath, struct utimbuf *buf) if (r < 0) return r; struct stat attr; - attr.st_mtim.tv_sec = buf->modtime; - attr.st_mtim.tv_nsec = 0; - attr.st_atim.tv_sec = buf->actime; - attr.st_atim.tv_nsec = 0; + stat_set_mtime_sec(&attr, buf->modtime); + stat_set_mtime_nsec(&attr, 0); + stat_set_atime_sec(&attr, buf->actime); + stat_set_atime_nsec(&attr, 0); return _setattr(in, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME); } diff --git a/src/common/PrioritizedQueue.h b/src/common/PrioritizedQueue.h index 6dcb519da400e..e663f277693ea 100644 --- a/src/common/PrioritizedQueue.h +++ b/src/common/PrioritizedQueue.h @@ -92,7 +92,7 @@ class PrioritizedQueue { SubQueue() : tokens(0), max_tokens(0), - size(0) {} + size(0), cur(q.begin()) {} void set_max_tokens(unsigned mt) { max_tokens = mt; } diff --git a/src/common/blkdev.cc b/src/common/blkdev.cc index b0dc0a54e9e2f..9c7240c0aac6c 100644 --- a/src/common/blkdev.cc +++ b/src/common/blkdev.cc @@ -1,40 +1,56 @@ -#include "include/int_types.h" - -#include -#include #include #include -#include -#include -#include - -#include "acconfig.h" -#include "include/compat.h" +#include +#include "include/int_types.h" -#if defined(__FreeBSD__) -#include -#endif +#ifdef __linux__ +#include int get_block_device_size(int fd, int64_t *psize) { - int ret = 0; - -#if defined(__FreeBSD__) - ret = ::ioctl(fd, DIOCGMEDIASIZE, psize); -#elif defined(__linux__) #ifdef BLKGETSIZE64 - // ioctl block device - ret = ::ioctl(fd, BLKGETSIZE64, psize); + int ret = ::ioctl(fd, BLKGETSIZE64, psize); #elif BLKGETSIZE - // hrm, try the 32 bit ioctl? unsigned long sectors = 0; - ret = ::ioctl(fd, BLKGETSIZE, §ors); + int ret = ::ioctl(fd, BLKGETSIZE, §ors); *psize = sectors * 512ULL; -#endif #else -#error "Compile error: we don't know how to get the size of a raw block device." -#endif /* !__FreeBSD__ */ +# error "Linux configuration error (get_block_device_size)" +#endif if (ret < 0) ret = -errno; return ret; } + +#elif defined(__APPLE__) +#include + +int get_block_device_size(int fd, int64_t *psize) +{ + unsigned long blocksize = 0; + int ret = ::ioctl(fd, DKIOCGETBLOCKSIZE, &blocksize); + if (!ret) { + unsigned long nblocks; + ret = ::ioctl(fd, DKIOCGETBLOCKCOUNT, &nblocks); + if (!ret) + *psize = (int64_t)nblocks * blocksize; + } + if (ret < 0) + ret = -errno; + return ret; +} + +#elif defined(__FreeBSD__) +#include + +int get_block_device_size(int fd, int64_t *psize) +{ + int ret = ::ioctl(fd, DIOCGMEDIASIZE, psize); + if (ret < 0) + ret = -errno; + return ret; +} + +#else +# error "Unable to query block device size: unsupported platform, please report." +#endif diff --git a/src/common/buffer.cc b/src/common/buffer.cc index acdfee0885b5d..819d767d006ff 100644 --- a/src/common/buffer.cc +++ b/src/common/buffer.cc @@ -854,7 +854,7 @@ void buffer::list::rebuild_page_aligned() (!p->is_page_aligned() || !p->is_n_page_sized() || (offset & ~CEPH_PAGE_MASK))); - ptr nb(buffer::create_page_aligned(_len)); + ptr nb(buffer::create_page_aligned(unaligned._len)); unaligned.rebuild(nb); _buffers.insert(p, unaligned._buffers.front()); } @@ -1045,7 +1045,7 @@ void buffer::list::rebuild_page_aligned() return 0; // no buffers std::list::const_iterator iter = _buffers.begin(); - iter++; + ++iter; if (iter != _buffers.end()) rebuild(); diff --git a/src/common/hobject.h b/src/common/hobject.h index edaf04a301fd6..3c2202df4b1b5 100644 --- a/src/common/hobject.h +++ b/src/common/hobject.h @@ -86,6 +86,11 @@ struct hobject_t { return ret; } + /// @return true if object is snapdir + bool is_snapdir() const { + return snap == CEPH_SNAPDIR; + } + /// @return snapdir version of this hobject_t hobject_t get_snapdir() const { hobject_t ret(*this); @@ -184,7 +189,7 @@ struct hobject_t { friend bool operator>=(const hobject_t&, const hobject_t&); friend bool operator==(const hobject_t&, const hobject_t&); friend bool operator!=(const hobject_t&, const hobject_t&); - friend class ghobject_t; + friend struct ghobject_t; }; WRITE_CLASS_ENCODER(hobject_t) diff --git a/src/common/pipe.c b/src/common/pipe.c index 9c01b322f89dc..9ec9e36e586ba 100644 --- a/src/common/pipe.c +++ b/src/common/pipe.c @@ -11,6 +11,7 @@ * Foundation. See file COPYING. * */ +#include "acconfig.h" #include "common/pipe.h" @@ -20,24 +21,40 @@ int pipe_cloexec(int pipefd[2]) { -#if defined(O_CLOEXEC) && !defined(__FreeBSD__) int ret; + +#if defined(HAVE_PIPE2) && defined(O_CLOEXEC) ret = pipe2(pipefd, O_CLOEXEC); - if (ret) { - ret = -errno; - return ret; - } + if (ret == -1) + return -errno; return 0; #else - /* The old-fashioned, race-condition prone way that we have to fall back on if - * O_CLOEXEC does not exist. */ - int ret = pipe(pipefd); - if (ret) { + ret = pipe(pipefd); + if (ret == -1) + return -errno; + + /* + * The old-fashioned, race-condition prone way that we have to fall + * back on if O_CLOEXEC does not exist. + */ + ret = fcntl(pipefd[0], F_SETFD, FD_CLOEXEC); + if (ret == -1) { + ret = -errno; + goto out; + } + + ret = fcntl(pipefd[1], F_SETFD, FD_CLOEXEC); + if (ret == -1) { ret = -errno; - return ret; + goto out; } - fcntl(pipefd[0], F_SETFD, FD_CLOEXEC); - fcntl(pipefd[1], F_SETFD, FD_CLOEXEC); + return 0; + +out: + TEMP_FAILURE_RETRY(close(pipefd[0])); + TEMP_FAILURE_RETRY(close(pipefd[1])); + + return ret; #endif } diff --git a/src/common/sharedptr_registry.hpp b/src/common/sharedptr_registry.hpp index 90043001ee7f8..83396b8cc5f67 100644 --- a/src/common/sharedptr_registry.hpp +++ b/src/common/sharedptr_registry.hpp @@ -33,7 +33,7 @@ class SharedPtrRegistry { private: Mutex lock; Cond cond; - map contents; + map > contents; class OnRemoval { SharedPtrRegistry *parent; @@ -44,8 +44,13 @@ class SharedPtrRegistry { void operator()(V *to_remove) { { Mutex::Locker l(parent->lock); - parent->contents.erase(key); - parent->cond.Signal(); + typename map >::iterator i = + parent->contents.find(key); + if (i != parent->contents.end() && + i->second.second == to_remove) { + parent->contents.erase(i); + parent->cond.Signal(); + } } delete to_remove; } @@ -68,9 +73,10 @@ class SharedPtrRegistry { { Mutex::Locker l(lock); VPtr next_val; - typename map::iterator i = contents.upper_bound(key); + typename map >::iterator i = + contents.upper_bound(key); while (i != contents.end() && - !(next_val = i->second.lock())) + !(next_val = i->second.first.lock())) ++i; if (i == contents.end()) return false; @@ -86,9 +92,10 @@ class SharedPtrRegistry { bool get_next(const K &key, pair *next) { VPtr next_val; Mutex::Locker l(lock); - typename map::iterator i = contents.upper_bound(key); + typename map >::iterator i = + contents.upper_bound(key); while (i != contents.end() && - !(next_val = i->second.lock())) + !(next_val = i->second.first.lock())) ++i; if (i == contents.end()) return false; @@ -101,8 +108,10 @@ class SharedPtrRegistry { Mutex::Locker l(lock); waiting++; while (1) { - if (contents.count(key)) { - VPtr retval = contents[key].lock(); + typename map >::iterator i = + contents.find(key); + if (i != contents.end()) { + VPtr retval = i->second.first.lock(); if (retval) { waiting--; return retval; @@ -120,8 +129,10 @@ class SharedPtrRegistry { Mutex::Locker l(lock); waiting++; while (1) { - if (contents.count(key)) { - VPtr retval = contents[key].lock(); + typename map >::iterator i = + contents.find(key); + if (i != contents.end()) { + VPtr retval = i->second.first.lock(); if (retval) { waiting--; return retval; @@ -131,12 +142,18 @@ class SharedPtrRegistry { } cond.Wait(lock); } - VPtr retval(new V(), OnRemoval(this, key)); - contents[key] = retval; + V *ptr = new V(); + VPtr retval(ptr, OnRemoval(this, key)); + contents.insert(make_pair(key, make_pair(retval, ptr))); waiting--; return retval; } + unsigned size() { + Mutex::Locker l(lock); + return contents.size(); + } + void remove(const K &key) { Mutex::Locker l(lock); contents.erase(key); @@ -148,8 +165,10 @@ class SharedPtrRegistry { Mutex::Locker l(lock); waiting++; while (1) { - if (contents.count(key)) { - VPtr retval = contents[key].lock(); + typename map >::iterator i = + contents.find(key); + if (i != contents.end()) { + VPtr retval = i->second.first.lock(); if (retval) { waiting--; return retval; @@ -159,8 +178,9 @@ class SharedPtrRegistry { } cond.Wait(lock); } - VPtr retval(new V(arg), OnRemoval(this, key)); - contents[key] = retval; + V *ptr = new V(arg); + VPtr retval(ptr, OnRemoval(this, key)); + contents.insert(make_pair(key, make_pair(retval, ptr))); waiting--; return retval; } diff --git a/src/crush/CrushTester.cc b/src/crush/CrushTester.cc index 0a649d8c77c0c..b38386d017962 100644 --- a/src/crush/CrushTester.cc +++ b/src/crush/CrushTester.cc @@ -1,6 +1,7 @@ #include "CrushTester.h" +#include #include @@ -203,7 +204,7 @@ bool CrushTester::check_valid_placement(int ruleno, vector in, const vector // check that we don't have any duplicate id's for (vector::iterator it = included_devices.begin(); it != included_devices.end(); ++it) { - int num_copies = count(included_devices.begin(), included_devices.end(), (*it) ); + int num_copies = std::count(included_devices.begin(), included_devices.end(), (*it) ); if (num_copies > 1) { valid_placement = false; } diff --git a/src/crush/CrushTester.h b/src/crush/CrushTester.h index 0cbab0fd98806..7a681e253dbff 100644 --- a/src/crush/CrushTester.h +++ b/src/crush/CrushTester.h @@ -12,7 +12,6 @@ class CrushTester { CrushWrapper& crush; ostream& err; - int verbose; map device_weight; int min_rule, max_rule; @@ -165,8 +164,8 @@ class CrushTester { void write_integer_indexed_scalar_data_string(vector &dst, int index, float scalar_data); public: - CrushTester(CrushWrapper& c, ostream& eo, int verbosity=0) - : crush(c), err(eo), verbose(verbosity), + CrushTester(CrushWrapper& c, ostream& eo) + : crush(c), err(eo), min_rule(-1), max_rule(-1), min_x(-1), max_x(-1), min_rep(-1), max_rep(-1), diff --git a/src/include/Makefile.am b/src/include/Makefile.am index 34976a6cc2990..6e076600e273c 100644 --- a/src/include/Makefile.am +++ b/src/include/Makefile.am @@ -78,4 +78,5 @@ noinst_HEADERS += \ include/rbd/features.h \ include/rbd/librbd.h \ include/rbd/librbd.hpp\ - include/util.h + include/util.h\ + include/stat.h diff --git a/src/include/assert.h b/src/include/assert.h index 38c0eeb44a1ea..5ff41ba69d7a3 100644 --- a/src/include/assert.h +++ b/src/include/assert.h @@ -27,27 +27,44 @@ struct FailedAssertion { #endif -#if defined __cplusplus && __GNUC_PREREQ (2,95) +#ifdef HAVE_STATIC_CAST # define __CEPH_ASSERT_VOID_CAST static_cast #else # define __CEPH_ASSERT_VOID_CAST (void) #endif -/* Version 2.4 and later of GCC define a magical variable `__PRETTY_FUNCTION__' - which contains the name of the function currently being defined. - This is broken in G++ before version 2.6. - C9x has a similar variable called __func__, but prefer the GCC one since - it demangles C++ function names. */ -# if defined __cplusplus ? __GNUC_PREREQ (2, 6) : __GNUC_PREREQ (2, 4) -# define __CEPH_ASSERT_FUNCTION __PRETTY_FUNCTION__ -# else -# if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L -# define __CEPH_ASSERT_FUNCTION __func__ -# else -# define __CEPH_ASSERT_FUNCTION ((__const char *) 0) -# endif +/* + * For GNU, test specific version features. Otherwise (e.g. LLVM) we'll use + * the defaults selected below. + */ +#ifdef __GNUC_PREREQ + +/* + * Version 2.4 and later of GCC define a magical variable + * `__PRETTY_FUNCTION__' which contains the name of the function currently + * being defined. This is broken in G++ before version 2.6. C9x has a + * similar variable called __func__, but prefer the GCC one since it demangles + * C++ function names. We define __CEPH_NO_PRETTY_FUNC if we want to avoid + * broken versions of G++. + */ +# if defined __cplusplus ? !__GNUC_PREREQ (2, 6) : !__GNUC_PREREQ (2, 4) +# define __CEPH_NO_PRETTY_FUNC # endif +#endif + +/* + * Select a function-name variable based on compiler tests, and any compiler + * specific overrides. + */ +#if defined(HAVE_PRETTY_FUNC) && !defined(__CEPH_NO_PRETTY_FUNC) +# define __CEPH_ASSERT_FUNCTION __PRETTY_FUNCTION__ +#elif defined(HAVE_FUNC) +# define __CEPH_ASSERT_FUNCTION __func__ +#else +# define __CEPH_ASSERT_FUNCTION ((__const char *) 0) +#endif + extern void register_assert_context(CephContext *cct); extern void __ceph_assert_fail(const char *assertion, const char *file, int line, const char *function) __attribute__ ((__noreturn__)); diff --git a/src/include/cephfs/libcephfs.h b/src/include/cephfs/libcephfs.h index 2f69ba7a3b91a..9b74f63c062d3 100644 --- a/src/include/cephfs/libcephfs.h +++ b/src/include/cephfs/libcephfs.h @@ -38,9 +38,9 @@ extern "C" { # error libceph: must define __USE_FILE_OFFSET64 or readdir results will be corrupted #endif -class ceph_mount_info; +struct ceph_mount_info; struct ceph_dir_result; -class CephContext; +struct CephContext; /* setattr mask bits */ #ifndef CEPH_SETATTR_MODE @@ -82,7 +82,7 @@ const char *ceph_version(int *major, int *minor, int *patch); * pass in NULL, and the id will be the process id of the client. * @returns 0 on success, negative error code on failure */ -int ceph_create(class ceph_mount_info **cmount, const char * const id); +int ceph_create(struct ceph_mount_info **cmount, const char * const id); /** * Create a mount handle from a CephContext, which holds the configuration @@ -94,7 +94,7 @@ int ceph_create(class ceph_mount_info **cmount, const char * const id); * @param conf reuse this pre-existing CephContext config * @returns 0 on success, negative error code on failure */ -int ceph_create_with_context(class ceph_mount_info **cmount, class CephContext *conf); +int ceph_create_with_context(struct ceph_mount_info **cmount, struct CephContext *conf); /** * Perform a mount using the path for the root of the mount. @@ -105,7 +105,7 @@ int ceph_create_with_context(class ceph_mount_info **cmount, class CephContext * * be "/". Passing in NULL is equivalent to "/". * @returns 0 on success, negative error code on failure */ -int ceph_mount(class ceph_mount_info *cmount, const char *root); +int ceph_mount(struct ceph_mount_info *cmount, const char *root); /** * Unmount a mount handle. @@ -113,7 +113,7 @@ int ceph_mount(class ceph_mount_info *cmount, const char *root); * @param cmount the mount handle * @return 0 on success, negative error code on failure */ -int ceph_unmount(class ceph_mount_info *cmount); +int ceph_unmount(struct ceph_mount_info *cmount); /** * Destroy the mount handle. @@ -124,7 +124,7 @@ int ceph_unmount(class ceph_mount_info *cmount); * @param cmount the mount handle * @return 0 on success, negative error code on failure. */ -int ceph_release(class ceph_mount_info *cmount); +int ceph_release(struct ceph_mount_info *cmount); /** * Deprecated. Unmount and destroy the ceph mount handle. This should be @@ -134,7 +134,7 @@ int ceph_release(class ceph_mount_info *cmount); * * @param cmount the mount handle to shutdown */ -void ceph_shutdown(class ceph_mount_info *cmount); +void ceph_shutdown(struct ceph_mount_info *cmount); /** * Extract the CephContext from the mount point handle. @@ -142,14 +142,14 @@ void ceph_shutdown(class ceph_mount_info *cmount); * @param cmount the ceph mount handle to get the context from. * @returns the CephContext associated with the mount handle. */ -class CephContext *ceph_get_mount_context(class ceph_mount_info *cmount); +struct CephContext *ceph_get_mount_context(struct ceph_mount_info *cmount); /* * Check mount status. * * Return non-zero value if mounted. Otherwise, zero. */ -int ceph_is_mounted(class ceph_mount_info *cmount); +int ceph_is_mounted(struct ceph_mount_info *cmount); /** @} init */ @@ -167,7 +167,7 @@ int ceph_is_mounted(class ceph_mount_info *cmount); * @param path_list the configuration file path * @returns 0 on success, negative error code on failure */ -int ceph_conf_read_file(class ceph_mount_info *cmount, const char *path_list); +int ceph_conf_read_file(struct ceph_mount_info *cmount, const char *path_list); /** * Parse the command line arguments and load the configuration parameters. @@ -177,7 +177,7 @@ int ceph_conf_read_file(class ceph_mount_info *cmount, const char *path_list); * @param argv the argument list * @returns 0 on success, negative error code on failure */ -int ceph_conf_parse_argv(class ceph_mount_info *cmount, int argc, const char **argv); +int ceph_conf_parse_argv(struct ceph_mount_info *cmount, int argc, const char **argv); /** * Configure the cluster handle based on an environment variable @@ -194,7 +194,7 @@ int ceph_conf_parse_argv(class ceph_mount_info *cmount, int argc, const char **a * @param var name of the environment variable to read * @returns 0 on success, negative error code on failure */ -int ceph_conf_parse_env(class ceph_mount_info *cmount, const char *var); +int ceph_conf_parse_env(struct ceph_mount_info *cmount, const char *var); /** Sets a configuration value from a string. * @@ -204,7 +204,7 @@ int ceph_conf_parse_env(class ceph_mount_info *cmount, const char *var); * * @returns 0 on success, negative error code otherwise. */ -int ceph_conf_set(class ceph_mount_info *cmount, const char *option, const char *value); +int ceph_conf_set(struct ceph_mount_info *cmount, const char *option, const char *value); /** * Gets the configuration value as a string. @@ -215,7 +215,7 @@ int ceph_conf_set(class ceph_mount_info *cmount, const char *option, const char * @param len the length of the buffer. * @returns the size of the buffer filled in with the value, or negative error code on failure */ -int ceph_conf_get(class ceph_mount_info *cmount, const char *option, char *buf, size_t len); +int ceph_conf_get(struct ceph_mount_info *cmount, const char *option, char *buf, size_t len); /** @} config */ @@ -236,7 +236,7 @@ int ceph_conf_get(class ceph_mount_info *cmount, const char *option, char *buf, * @param stbuf the file system statistics filled in by this function. * @return 0 on success, negative error code otherwise. */ -int ceph_statfs(class ceph_mount_info *cmount, const char *path, struct statvfs *stbuf); +int ceph_statfs(struct ceph_mount_info *cmount, const char *path, struct statvfs *stbuf); /** * Synchronize all filesystem data to persistent media. @@ -244,7 +244,7 @@ int ceph_statfs(class ceph_mount_info *cmount, const char *path, struct statvfs * @param cmount the ceph mount handle to use for performing the sync_fs. * @returns 0 on success or negative error code on failure. */ -int ceph_sync_fs(class ceph_mount_info *cmount); +int ceph_sync_fs(struct ceph_mount_info *cmount); /** * Get the current working directory. @@ -252,7 +252,7 @@ int ceph_sync_fs(class ceph_mount_info *cmount); * @param cmount the ceph mount to get the current working directory for. * @returns the path to the current working directory */ -const char* ceph_getcwd(class ceph_mount_info *cmount); +const char* ceph_getcwd(struct ceph_mount_info *cmount); /** * Change the current working directory. @@ -261,7 +261,7 @@ const char* ceph_getcwd(class ceph_mount_info *cmount); * @param path the path to the working directory to change into. * @returns 0 on success, negative error code otherwise. */ -int ceph_chdir(class ceph_mount_info *cmount, const char *s); +int ceph_chdir(struct ceph_mount_info *cmount, const char *s); /** @} fsops */ @@ -281,7 +281,7 @@ int ceph_chdir(class ceph_mount_info *cmount, const char *s); * @param dirpp the directory result pointer structure to fill in. * @returns 0 on success or negative error code otherwise. */ -int ceph_opendir(class ceph_mount_info *cmount, const char *name, struct ceph_dir_result **dirpp); +int ceph_opendir(struct ceph_mount_info *cmount, const char *name, struct ceph_dir_result **dirpp); /** * Close the open directory. @@ -290,7 +290,7 @@ int ceph_opendir(class ceph_mount_info *cmount, const char *name, struct ceph_di * @param dirp the directory result pointer (set by ceph_opendir) to close * @returns 0 on success or negative error code on failure. */ -int ceph_closedir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp); +int ceph_closedir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp); /** * Get the next entry in an open directory. @@ -302,7 +302,7 @@ int ceph_closedir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp); * is empty. This pointer should not be freed by the caller, and is only safe to * access between return and the next call to ceph_readdir or ceph_closedir. */ -struct dirent * ceph_readdir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp); +struct dirent * ceph_readdir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp); /** * A safe version of ceph_readdir, where the directory entry struct is allocated by the caller. @@ -314,7 +314,7 @@ struct dirent * ceph_readdir(class ceph_mount_info *cmount, struct ceph_dir_resu * @returns 1 if the next entry was filled in, 0 if the end of the directory stream was reached, * and a negative error code on failure. */ -int ceph_readdir_r(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de); +int ceph_readdir_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de); /** * A safe version of ceph_readdir that also returns the file statistics (readdir+stat). @@ -328,7 +328,7 @@ int ceph_readdir_r(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, * @returns 1 if the next entry was filled in, 0 if the end of the directory stream was reached, * and a negative error code on failure. */ -int ceph_readdirplus_r(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de, +int ceph_readdirplus_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de, struct stat *st, int *stmask); /** @@ -342,7 +342,7 @@ int ceph_readdirplus_r(class ceph_mount_info *cmount, struct ceph_dir_result *di * @returns the length of the buffer that was filled in, will always be multiples of sizeof(struct dirent), or a * negative error code. If the buffer is not large enough for a single entry, -ERANGE is returned. */ -int ceph_getdents(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, char *name, int buflen); +int ceph_getdents(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, char *name, int buflen); /** * Gets multiple directory names. @@ -355,7 +355,7 @@ int ceph_getdents(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, c * @returns the length of the buffer filled in with entry names, or a negative error code on failure. * If the buffer isn't large enough for a single entry, -ERANGE is returned. */ -int ceph_getdnames(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, char *name, int buflen); +int ceph_getdnames(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, char *name, int buflen); /** * Rewind the directory stream to the beginning of the directory. @@ -363,7 +363,7 @@ int ceph_getdnames(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, * @param cmount the ceph mount handle to use for performing the rewinddir. * @param dirp the directory stream pointer to rewind. */ -void ceph_rewinddir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp); +void ceph_rewinddir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp); /** * Get the current position of a directory stream. @@ -374,7 +374,7 @@ void ceph_rewinddir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp) * by ceph_telldir do not have a particular order (cannot be compared with * inequality). */ -loff_t ceph_telldir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp); +loff_t ceph_telldir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp); /** * Move the directory stream to a position specified by the given offset. @@ -385,7 +385,7 @@ loff_t ceph_telldir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp) * a value returned by seekdir. Note that this value does not refer to the nth * entry in a directory, and can not be manipulated with plus or minus. */ -void ceph_seekdir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, loff_t offset); +void ceph_seekdir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, loff_t offset); /** * Create a directory. @@ -396,7 +396,7 @@ void ceph_seekdir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, l * @param mode the permissions the directory should have once created. * @returns 0 on success or a negative return code on error. */ -int ceph_mkdir(class ceph_mount_info *cmount, const char *path, mode_t mode); +int ceph_mkdir(struct ceph_mount_info *cmount, const char *path, mode_t mode); /** * Create multiple directories at once. @@ -407,7 +407,7 @@ int ceph_mkdir(class ceph_mount_info *cmount, const char *path, mode_t mode); * @param mode the permissions the directory should have once created. * @returns 0 on success or a negative return code on error. */ -int ceph_mkdirs(class ceph_mount_info *cmount, const char *path, mode_t mode); +int ceph_mkdirs(struct ceph_mount_info *cmount, const char *path, mode_t mode); /** * Remove a directory. @@ -416,7 +416,7 @@ int ceph_mkdirs(class ceph_mount_info *cmount, const char *path, mode_t mode); * @param path the path of the directory to remove. * @returns 0 on success or a negative return code on error. */ -int ceph_rmdir(class ceph_mount_info *cmount, const char *path); +int ceph_rmdir(struct ceph_mount_info *cmount, const char *path); /** @} dir */ @@ -435,7 +435,7 @@ int ceph_rmdir(class ceph_mount_info *cmount, const char *path); * @param newname the path to the new file/directory to link from. * @returns 0 on success or a negative return code on error. */ -int ceph_link(class ceph_mount_info *cmount, const char *existing, const char *newname); +int ceph_link(struct ceph_mount_info *cmount, const char *existing, const char *newname); /** * Read a symbolic link. @@ -446,7 +446,7 @@ int ceph_link(class ceph_mount_info *cmount, const char *existing, const char *n * @param size the length of the buffer * @returns 0 on success or negative error code on failure */ -int ceph_readlink(class ceph_mount_info *cmount, const char *path, char *buf, loff_t size); +int ceph_readlink(struct ceph_mount_info *cmount, const char *path, char *buf, loff_t size); /** * Creates a symbolic link. @@ -456,7 +456,7 @@ int ceph_readlink(class ceph_mount_info *cmount, const char *path, char *buf, lo * @param newname the path to the new file/directory to link from. * @returns 0 on success or a negative return code on failure. */ -int ceph_symlink(class ceph_mount_info *cmount, const char *existing, const char *newname); +int ceph_symlink(struct ceph_mount_info *cmount, const char *existing, const char *newname); /** @} links */ @@ -475,7 +475,7 @@ int ceph_symlink(class ceph_mount_info *cmount, const char *existing, const char * @param path the path of the file or link to unlink. * @returns 0 on success or negative error code on failure. */ -int ceph_unlink(class ceph_mount_info *cmount, const char *path); +int ceph_unlink(struct ceph_mount_info *cmount, const char *path); /** * Rename a file or directory. @@ -485,7 +485,7 @@ int ceph_unlink(class ceph_mount_info *cmount, const char *path); * @param to the new name of the file or directory * @returns 0 on success or negative error code on failure. */ -int ceph_rename(class ceph_mount_info *cmount, const char *from, const char *to); +int ceph_rename(struct ceph_mount_info *cmount, const char *from, const char *to); /** * Get a file's statistics and attributes. @@ -495,7 +495,7 @@ int ceph_rename(class ceph_mount_info *cmount, const char *from, const char *to) * @param stbuf the stat struct that will be filled in with the file's statistics. * @returns 0 on success or negative error code on failure. */ -int ceph_stat(class ceph_mount_info *cmount, const char *path, struct stat *stbuf); +int ceph_stat(struct ceph_mount_info *cmount, const char *path, struct stat *stbuf); /** * Get a file's statistics and attributes, without following symlinks. @@ -505,7 +505,7 @@ int ceph_stat(class ceph_mount_info *cmount, const char *path, struct stat *stbu * @param stbuf the stat struct that will be filled in with the file's statistics. * @returns 0 on success or negative error code on failure. */ -int ceph_lstat(class ceph_mount_info *cmount, const char *path, struct stat *stbuf); +int ceph_lstat(struct ceph_mount_info *cmount, const char *path, struct stat *stbuf); /** * Set a file's attributes. @@ -516,7 +516,7 @@ int ceph_lstat(class ceph_mount_info *cmount, const char *path, struct stat *stb * @param mask a mask of all the stat values that have been set on the stat struct. * @returns 0 on success or negative error code on failure. */ -int ceph_setattr(class ceph_mount_info *cmount, const char *relpath, struct stat *attr, int mask); +int ceph_setattr(struct ceph_mount_info *cmount, const char *relpath, struct stat *attr, int mask); /** * Change the mode bits (permissions) of a file/directory. @@ -526,7 +526,7 @@ int ceph_setattr(class ceph_mount_info *cmount, const char *relpath, struct stat * @param mode the new permissions to set. * @returns 0 on success or a negative error code on failure. */ -int ceph_chmod(class ceph_mount_info *cmount, const char *path, mode_t mode); +int ceph_chmod(struct ceph_mount_info *cmount, const char *path, mode_t mode); /** * Change the mode bits (permissions) of an open file. @@ -536,7 +536,7 @@ int ceph_chmod(class ceph_mount_info *cmount, const char *path, mode_t mode); * @param mode the new permissions to set. * @returns 0 on success or a negative error code on failure. */ -int ceph_fchmod(class ceph_mount_info *cmount, int fd, mode_t mode); +int ceph_fchmod(struct ceph_mount_info *cmount, int fd, mode_t mode); /** * Change the ownership of a file/directory. @@ -547,7 +547,7 @@ int ceph_fchmod(class ceph_mount_info *cmount, int fd, mode_t mode); * @param gid the group id to set on the file/directory. * @returns 0 on success or negative error code on failure. */ -int ceph_chown(class ceph_mount_info *cmount, const char *path, int uid, int gid); +int ceph_chown(struct ceph_mount_info *cmount, const char *path, int uid, int gid); /** * Change the ownership of a file from an open file descriptor. @@ -558,7 +558,7 @@ int ceph_chown(class ceph_mount_info *cmount, const char *path, int uid, int gid * @param gid the group id to set on the file/directory. * @returns 0 on success or negative error code on failure. */ -int ceph_fchown(class ceph_mount_info *cmount, int fd, int uid, int gid); +int ceph_fchown(struct ceph_mount_info *cmount, int fd, int uid, int gid); /** * Change the ownership of a file/directory, don't follow symlinks. @@ -569,7 +569,7 @@ int ceph_fchown(class ceph_mount_info *cmount, int fd, int uid, int gid); * @param gid the group id to set on the file/directory. * @returns 0 on success or negative error code on failure. */ -int ceph_lchown(class ceph_mount_info *cmount, const char *path, int uid, int gid); +int ceph_lchown(struct ceph_mount_info *cmount, const char *path, int uid, int gid); /** * Change file/directory last access and modification times. @@ -579,7 +579,7 @@ int ceph_lchown(class ceph_mount_info *cmount, const char *path, int uid, int gi * @param buf holding the access and modification times to set on the file. * @returns 0 on success or negative error code on failure. */ -int ceph_utime(class ceph_mount_info *cmount, const char *path, struct utimbuf *buf); +int ceph_utime(struct ceph_mount_info *cmount, const char *path, struct utimbuf *buf); /** * Truncate the file to the given size. If this operation causes the @@ -590,7 +590,7 @@ int ceph_utime(class ceph_mount_info *cmount, const char *path, struct utimbuf * * @param size the new size of the file. * @returns 0 on success or a negative error code on failure. */ -int ceph_truncate(class ceph_mount_info *cmount, const char *path, loff_t size); +int ceph_truncate(struct ceph_mount_info *cmount, const char *path, loff_t size); /** * Make a block or character special file. @@ -604,7 +604,7 @@ int ceph_truncate(class ceph_mount_info *cmount, const char *path, loff_t size); * it is ignored. * @returns 0 on success or negative error code on failure. */ -int ceph_mknod(class ceph_mount_info *cmount, const char *path, mode_t mode, dev_t rdev); +int ceph_mknod(struct ceph_mount_info *cmount, const char *path, mode_t mode, dev_t rdev); /** * Create and/or open a file. * @@ -616,7 +616,7 @@ int ceph_mknod(class ceph_mount_info *cmount, const char *path, mode_t mode, dev * is specified in the flags. * @returns a non-negative file descriptor number on success or a negative error code on failure. */ -int ceph_open(class ceph_mount_info *cmount, const char *path, int flags, mode_t mode); +int ceph_open(struct ceph_mount_info *cmount, const char *path, int flags, mode_t mode); /** * Create and/or open a file with a specific file layout. @@ -633,7 +633,7 @@ int ceph_open(class ceph_mount_info *cmount, const char *path, int flags, mode_t * @param data_pool name of target data pool name (optional, NULL or empty string for default) * @returns a non-negative file descriptor number on success or a negative error code on failure. */ -int ceph_open_layout(class ceph_mount_info *cmount, const char *path, int flags, +int ceph_open_layout(struct ceph_mount_info *cmount, const char *path, int flags, mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool); @@ -644,7 +644,7 @@ int ceph_open_layout(class ceph_mount_info *cmount, const char *path, int flags, * @param fd the file descriptor referring to the open file. * @returns 0 on success or a negative error code on failure. */ -int ceph_close(class ceph_mount_info *cmount, int fd); +int ceph_close(struct ceph_mount_info *cmount, int fd); /** * Reposition the open file stream based on the given offset. @@ -659,7 +659,7 @@ int ceph_close(class ceph_mount_info *cmount, int fd); * SEEK_END: the offset is set to the end of the file plus @ref offset bytes. * @returns 0 on success or a negative error code on failure. */ -loff_t ceph_lseek(class ceph_mount_info *cmount, int fd, loff_t offset, int whence); +loff_t ceph_lseek(struct ceph_mount_info *cmount, int fd, loff_t offset, int whence); /** * Read data from the file. * @@ -671,7 +671,7 @@ loff_t ceph_lseek(class ceph_mount_info *cmount, int fd, loff_t offset, int when * function reads from the current offset of the file descriptor. * @returns the number of bytes read into buf, or a negative error code on failure. */ -int ceph_read(class ceph_mount_info *cmount, int fd, char *buf, loff_t size, loff_t offset); +int ceph_read(struct ceph_mount_info *cmount, int fd, char *buf, loff_t size, loff_t offset); /** * Write data to a file. @@ -684,7 +684,7 @@ int ceph_read(class ceph_mount_info *cmount, int fd, char *buf, loff_t size, lof * function writes to the current offset of the file descriptor. * @returns the number of bytes written, or a negative error code */ -int ceph_write(class ceph_mount_info *cmount, int fd, const char *buf, loff_t size, +int ceph_write(struct ceph_mount_info *cmount, int fd, const char *buf, loff_t size, loff_t offset); /** @@ -695,7 +695,7 @@ int ceph_write(class ceph_mount_info *cmount, int fd, const char *buf, loff_t si * @param size the new size of the file * @returns 0 on success or a negative error code on failure. */ -int ceph_ftruncate(class ceph_mount_info *cmount, int fd, loff_t size); +int ceph_ftruncate(struct ceph_mount_info *cmount, int fd, loff_t size); /** * Synchronize an open file to persistent media. @@ -706,7 +706,7 @@ int ceph_ftruncate(class ceph_mount_info *cmount, int fd, loff_t size); * or just data (1). * @return 0 on success or a negative error code on failure. */ -int ceph_fsync(class ceph_mount_info *cmount, int fd, int syncdataonly); +int ceph_fsync(struct ceph_mount_info *cmount, int fd, int syncdataonly); /** * Preallocate or release disk space for the file for the byte range. @@ -735,7 +735,7 @@ int ceph_fallocate(struct ceph_mount_info *cmount, int fd, int mode, * function. * @returns 0 on success or a negative error code on failure */ -int ceph_fstat(class ceph_mount_info *cmount, int fd, struct stat *stbuf); +int ceph_fstat(struct ceph_mount_info *cmount, int fd, struct stat *stbuf); /** @} file */ @@ -756,7 +756,7 @@ int ceph_fstat(class ceph_mount_info *cmount, int fd, struct stat *stbuf); * @param size the size of the pre-allocated buffer * @returns the size of the value or a negative error code on failure. */ -int ceph_getxattr(class ceph_mount_info *cmount, const char *path, const char *name, +int ceph_getxattr(struct ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size); /** @@ -772,7 +772,7 @@ int ceph_getxattr(class ceph_mount_info *cmount, const char *path, const char *n * @param size the size of the pre-allocated buffer * @returns the size of the value or a negative error code on failure. */ -int ceph_lgetxattr(class ceph_mount_info *cmount, const char *path, const char *name, +int ceph_lgetxattr(struct ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size); /** @@ -784,7 +784,7 @@ int ceph_lgetxattr(class ceph_mount_info *cmount, const char *path, const char * * @param size the size of the list buffer. * @returns the size of the resulting list filled in. */ -int ceph_listxattr(class ceph_mount_info *cmount, const char *path, char *list, size_t size); +int ceph_listxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size); /** * Get the list of extended attribute keys on a file, but do not follow symbolic links. @@ -795,7 +795,7 @@ int ceph_listxattr(class ceph_mount_info *cmount, const char *path, char *list, * @param size the size of the list buffer. * @returns the size of the resulting list filled in. */ -int ceph_llistxattr(class ceph_mount_info *cmount, const char *path, char *list, size_t size); +int ceph_llistxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size); /** * Remove an extended attribute from a file. @@ -805,7 +805,7 @@ int ceph_llistxattr(class ceph_mount_info *cmount, const char *path, char *list, * @param name the name of the extended attribute to remove. * @returns 0 on success or a negative error code on failure. */ -int ceph_removexattr(class ceph_mount_info *cmount, const char *path, const char *name); +int ceph_removexattr(struct ceph_mount_info *cmount, const char *path, const char *name); /** * Remove the extended attribute from a file, do not follow symbolic links. @@ -815,7 +815,7 @@ int ceph_removexattr(class ceph_mount_info *cmount, const char *path, const char * @param name the name of the extended attribute to remove. * @returns 0 on success or a negative error code on failure. */ -int ceph_lremovexattr(class ceph_mount_info *cmount, const char *path, const char *name); +int ceph_lremovexattr(struct ceph_mount_info *cmount, const char *path, const char *name); /** * Set an extended attribute on a file. @@ -830,7 +830,7 @@ int ceph_lremovexattr(class ceph_mount_info *cmount, const char *path, const cha * CEPH_XATTR_REPLACE: replace the extended attribute, Must already exist. * @returns 0 on success or a negative error code on failure. */ -int ceph_setxattr(class ceph_mount_info *cmount, const char *path, const char *name, +int ceph_setxattr(struct ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags); /** @@ -846,7 +846,7 @@ int ceph_setxattr(class ceph_mount_info *cmount, const char *path, const char *n * CEPH_XATTR_REPLACE: replace the extended attribute, Must already exist. * @returns 0 on success or a negative error code on failure. */ -int ceph_lsetxattr(class ceph_mount_info *cmount, const char *path, const char *name, +int ceph_lsetxattr(struct ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags); /** @} xattr */ @@ -865,7 +865,7 @@ int ceph_lsetxattr(class ceph_mount_info *cmount, const char *path, const char * * @param fh the open file descriptor referring to the file to get the striping unit of. * @returns the striping unit of the file or a negative error code on failure. */ -int ceph_get_file_stripe_unit(class ceph_mount_info *cmount, int fh); +int ceph_get_file_stripe_unit(struct ceph_mount_info *cmount, int fh); /** * Get the file striping unit. @@ -874,7 +874,7 @@ int ceph_get_file_stripe_unit(class ceph_mount_info *cmount, int fh); * @param path the path of the file/directory get the striping unit of. * @returns the striping unit of the file or a negative error code on failure. */ -int ceph_get_path_stripe_unit(class ceph_mount_info *cmount, const char *path); +int ceph_get_path_stripe_unit(struct ceph_mount_info *cmount, const char *path); /** * Get the file striping count from an open file descriptor. @@ -883,7 +883,7 @@ int ceph_get_path_stripe_unit(class ceph_mount_info *cmount, const char *path); * @param fh the open file descriptor referring to the file to get the striping count of. * @returns the striping count of the file or a negative error code on failure. */ -int ceph_get_file_stripe_count(class ceph_mount_info *cmount, int fh); +int ceph_get_file_stripe_count(struct ceph_mount_info *cmount, int fh); /** * Get the file striping count. @@ -892,7 +892,7 @@ int ceph_get_file_stripe_count(class ceph_mount_info *cmount, int fh); * @param path the path of the file/directory get the striping count of. * @returns the striping count of the file or a negative error code on failure. */ -int ceph_get_path_stripe_count(class ceph_mount_info *cmount, const char *path); +int ceph_get_path_stripe_count(struct ceph_mount_info *cmount, const char *path); /** * Get the file object size from an open file descriptor. @@ -901,7 +901,7 @@ int ceph_get_path_stripe_count(class ceph_mount_info *cmount, const char *path); * @param fh the open file descriptor referring to the file to get the object size of. * @returns the object size of the file or a negative error code on failure. */ -int ceph_get_file_object_size(class ceph_mount_info *cmount, int fh); +int ceph_get_file_object_size(struct ceph_mount_info *cmount, int fh); /** * Get the file object size. @@ -910,7 +910,7 @@ int ceph_get_file_object_size(class ceph_mount_info *cmount, int fh); * @param path the path of the file/directory get the object size of. * @returns the object size of the file or a negative error code on failure. */ -int ceph_get_path_object_size(class ceph_mount_info *cmount, const char *path); +int ceph_get_path_object_size(struct ceph_mount_info *cmount, const char *path); /** * Get the file pool information from an open file descriptor. @@ -919,7 +919,7 @@ int ceph_get_path_object_size(class ceph_mount_info *cmount, const char *path); * @param fh the open file descriptor referring to the file to get the pool information of. * @returns the ceph pool id that the file is in */ -int ceph_get_file_pool(class ceph_mount_info *cmount, int fh); +int ceph_get_file_pool(struct ceph_mount_info *cmount, int fh); /** * Get the file pool information. @@ -928,7 +928,7 @@ int ceph_get_file_pool(class ceph_mount_info *cmount, int fh); * @param path the path of the file/directory get the pool information of. * @returns the ceph pool id that the file is in */ -int ceph_get_path_pool(class ceph_mount_info *cmount, const char *path); +int ceph_get_path_pool(struct ceph_mount_info *cmount, const char *path); /** * Get the name of the pool a opened file is stored in, @@ -942,7 +942,7 @@ int ceph_get_path_pool(class ceph_mount_info *cmount, const char *path); * @param buflen size of the buffer * @returns length in bytes of the pool name, or -ERANGE if the buffer is not large enough. */ -int ceph_get_file_pool_name(class ceph_mount_info *cmount, int fh, char *buf, size_t buflen); +int ceph_get_file_pool_name(struct ceph_mount_info *cmount, int fh, char *buf, size_t buflen); /** * get the name of a pool by id @@ -955,7 +955,7 @@ int ceph_get_file_pool_name(class ceph_mount_info *cmount, int fh, char *buf, si * @param buflen size of the buffer * @returns length in bytes of the pool name, or -ERANGE if the buffer is not large enough */ -int ceph_get_pool_name(class ceph_mount_info *cmount, int pool, char *buf, size_t buflen); +int ceph_get_pool_name(struct ceph_mount_info *cmount, int pool, char *buf, size_t buflen); /** * Get the name of the pool a file is stored in @@ -969,7 +969,7 @@ int ceph_get_pool_name(class ceph_mount_info *cmount, int pool, char *buf, size_ * @param buflen size of the buffer * @returns length in bytes of the pool name, or -ERANGE if the buffer is not large enough. */ -int ceph_get_path_pool_name(class ceph_mount_info *cmount, const char *path, char *buf, size_t buflen); +int ceph_get_path_pool_name(struct ceph_mount_info *cmount, const char *path, char *buf, size_t buflen); /** * Get the file layout from an open file descriptor. @@ -982,7 +982,7 @@ int ceph_get_path_pool_name(class ceph_mount_info *cmount, const char *path, cha * @param pg_pool where to store the ceph pool id that the file is in * @returns 0 on success or a negative error code on failure. */ -int ceph_get_file_layout(class ceph_mount_info *cmount, int fh, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool); +int ceph_get_file_layout(struct ceph_mount_info *cmount, int fh, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool); /** * Get the file layout. @@ -995,7 +995,7 @@ int ceph_get_file_layout(class ceph_mount_info *cmount, int fh, int *stripe_unit * @param pg_pool where to store the ceph pool id that the file is in * @returns 0 on success or a negative error code on failure. */ -int ceph_get_path_layout(class ceph_mount_info *cmount, const char *path, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool); +int ceph_get_path_layout(struct ceph_mount_info *cmount, const char *path, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool); /** * Get the file replication information from an open file descriptor. @@ -1004,7 +1004,7 @@ int ceph_get_path_layout(class ceph_mount_info *cmount, const char *path, int *s * @param fh the open file descriptor referring to the file to get the replication information of. * @returns the replication factor of the file. */ -int ceph_get_file_replication(class ceph_mount_info *cmount, int fh); +int ceph_get_file_replication(struct ceph_mount_info *cmount, int fh); /** * Get the file replication information. @@ -1013,7 +1013,7 @@ int ceph_get_file_replication(class ceph_mount_info *cmount, int fh); * @param path the path of the file/directory get the replication information of. * @returns the replication factor of the file. */ -int ceph_get_path_replication(class ceph_mount_info *cmount, const char *path); +int ceph_get_path_replication(struct ceph_mount_info *cmount, const char *path); /** * Get the id of the named pool. @@ -1022,7 +1022,7 @@ int ceph_get_path_replication(class ceph_mount_info *cmount, const char *path); * @param pool_name the name of the pool. * @returns the pool id, or a negative error code on failure. */ -int ceph_get_pool_id(class ceph_mount_info *cmount, const char *pool_name); +int ceph_get_pool_id(struct ceph_mount_info *cmount, const char *pool_name); /** * Get the pool replication factor. @@ -1031,7 +1031,7 @@ int ceph_get_pool_id(class ceph_mount_info *cmount, const char *pool_name); * @param pool_id the pool id to look up * @returns the replication factor, or a negative error code on failure. */ -int ceph_get_pool_replication(class ceph_mount_info *cmount, int pool_id); +int ceph_get_pool_replication(struct ceph_mount_info *cmount, int pool_id); /** * Get the OSD address where the primary copy of a file stripe is located. @@ -1045,7 +1045,7 @@ int ceph_get_pool_replication(class ceph_mount_info *cmount, int pool_id); * @returns the size of the addressed filled into the @ref addr parameter, or a negative * error code on failure. */ -int ceph_get_file_stripe_address(class ceph_mount_info *cmount, int fd, loff_t offset, +int ceph_get_file_stripe_address(struct ceph_mount_info *cmount, int fd, loff_t offset, struct sockaddr_storage *addr, int naddr); /** @@ -1061,7 +1061,7 @@ int ceph_get_file_stripe_address(class ceph_mount_info *cmount, int fd, loff_t o * @returns the number of items stored in the output array, or -ERANGE if the * array is not large enough. */ -int ceph_get_file_extent_osds(class ceph_mount_info *cmount, int fh, +int ceph_get_file_extent_osds(struct ceph_mount_info *cmount, int fh, loff_t offset, loff_t *length, int *osds, int nosds); /** @@ -1078,7 +1078,7 @@ int ceph_get_file_extent_osds(class ceph_mount_info *cmount, int fh, * @returns the amount of bytes written into the buffer, or -ERANGE if the * array is not large enough. */ -int ceph_get_osd_crush_location(class ceph_mount_info *cmount, +int ceph_get_osd_crush_location(struct ceph_mount_info *cmount, int osd, char *path, size_t len); /** @@ -1089,7 +1089,7 @@ int ceph_get_osd_crush_location(class ceph_mount_info *cmount, * @param addr the OSD network address. * @returns zero on success, other returns a negative error code. */ -int ceph_get_osd_addr(class ceph_mount_info *cmount, int osd, +int ceph_get_osd_addr(struct ceph_mount_info *cmount, int osd, struct sockaddr_storage *addr); /** @@ -1097,7 +1097,7 @@ int ceph_get_osd_addr(class ceph_mount_info *cmount, int osd, * @param cmount the ceph mount handle. * @returns the stripe unit granularity or a negative error code on failure. */ -int ceph_get_stripe_unit_granularity(class ceph_mount_info *cmount); +int ceph_get_stripe_unit_granularity(struct ceph_mount_info *cmount); /** @} filelayout */ @@ -1105,11 +1105,11 @@ int ceph_get_stripe_unit_granularity(class ceph_mount_info *cmount); * No longer available. Do not use. * These functions will return -EOPNOTSUPP. */ -int ceph_set_default_file_stripe_unit(class ceph_mount_info *cmount, int stripe); -int ceph_set_default_file_stripe_count(class ceph_mount_info *cmount, int count); -int ceph_set_default_object_size(class ceph_mount_info *cmount, int size); -int ceph_set_default_preferred_pg(class ceph_mount_info *cmount, int osd); -int ceph_set_default_file_replication(class ceph_mount_info *cmount, int replication); +int ceph_set_default_file_stripe_unit(struct ceph_mount_info *cmount, int stripe); +int ceph_set_default_file_stripe_count(struct ceph_mount_info *cmount, int count); +int ceph_set_default_object_size(struct ceph_mount_info *cmount, int size); +int ceph_set_default_preferred_pg(struct ceph_mount_info *cmount, int osd); +int ceph_set_default_file_replication(struct ceph_mount_info *cmount, int replication); /** * Read from local replicas when possible. @@ -1119,7 +1119,7 @@ int ceph_set_default_file_replication(class ceph_mount_info *cmount, int replica * for reads. * @returns 0 */ -int ceph_localize_reads(class ceph_mount_info *cmount, int val); +int ceph_localize_reads(struct ceph_mount_info *cmount, int val); /** * Get the osd id of the local osd (if any) @@ -1128,7 +1128,7 @@ int ceph_localize_reads(class ceph_mount_info *cmount, int val); * @returns the osd (if any) local to the node where this call is made, otherwise * -1 is returned. */ -int ceph_get_local_osd(class ceph_mount_info *cmount); +int ceph_get_local_osd(struct ceph_mount_info *cmount); /** @} default_filelayout */ @@ -1140,7 +1140,7 @@ int ceph_get_local_osd(class ceph_mount_info *cmount); * @returns the current capabilities issued to this client * for the open file */ -int ceph_debug_get_fd_caps(class ceph_mount_info *cmount, int fd); +int ceph_debug_get_fd_caps(struct ceph_mount_info *cmount, int fd); /** * Get the capabilities currently issued to the client. @@ -1150,7 +1150,7 @@ int ceph_debug_get_fd_caps(class ceph_mount_info *cmount, int fd); * @returns the current capabilities issued to this client * for the file */ -int ceph_debug_get_file_caps(class ceph_mount_info *cmount, const char *path); +int ceph_debug_get_file_caps(struct ceph_mount_info *cmount, const char *path); #ifdef __cplusplus } diff --git a/src/include/stat.h b/src/include/stat.h new file mode 100644 index 0000000000000..19398758e210d --- /dev/null +++ b/src/include/stat.h @@ -0,0 +1,145 @@ +#ifndef CEPH_STAT_H +#define CEPH_STAT_H + +#include + +#include + +/* + * Access time-related `struct stat` members. + * + * Note that for each of the stat member get/set functions below, setting a + * high-res value (stat_set_*_nsec) on a platform without high-res support is + * a no-op. + */ + +#ifdef HAVE_STAT_ST_MTIM_TV_NSEC + +static inline uint32_t stat_get_mtime_nsec(struct stat *st) +{ + return st->st_mtim.tv_nsec; +} + +static inline void stat_set_mtime_nsec(struct stat *st, uint32_t nsec) +{ + st->st_mtim.tv_nsec = nsec; +} + +static inline uint32_t stat_get_atime_nsec(struct stat *st) +{ + return st->st_atim.tv_nsec; +} + +static inline void stat_set_atime_nsec(struct stat *st, uint32_t nsec) +{ + st->st_atim.tv_nsec = nsec; +} + +static inline uint32_t stat_get_ctime_nsec(struct stat *st) +{ + return st->st_ctim.tv_nsec; +} + +static inline void stat_set_ctime_nsec(struct stat *st, uint32_t nsec) +{ + st->st_ctim.tv_nsec = nsec; +} + +#elif defined(HAVE_STAT_ST_MTIMESPEC_TV_NSEC) + +static inline uint32_t stat_get_mtime_nsec(struct stat *st) +{ + return st->st_mtimespec.tv_nsec; +} + +static inline void stat_set_mtime_nsec(struct stat *st, uint32_t nsec) +{ + st->st_mtimespec.tv_nsec = nsec; +} + +static inline uint32_t stat_get_atime_nsec(struct stat *st) +{ + return st->st_atimespec.tv_nsec; +} + +static inline void stat_set_atime_nsec(struct stat *st, uint32_t nsec) +{ + st->st_atimespec.tv_nsec = nsec; +} + +static inline uint32_t stat_get_ctime_nsec(struct stat *st) +{ + return st->st_ctimespec.tv_nsec; +} + +static inline void stat_set_ctime_nsec(struct stat *st, uint32_t nsec) +{ + st->st_ctimespec.tv_nsec = nsec; +} + +#else + +static inline uint32_t stat_get_mtime_nsec(struct stat *st) +{ + return 0; +} + +static inline void stat_set_mtime_nsec(struct stat *st, uint32_t nsec) +{ +} + +static inline uint32_t stat_get_atime_nsec(struct stat *st) +{ + return 0; +} + +static inline void stat_set_atime_nsec(struct stat *st, uint32_t nsec) +{ +} + +static inline uint32_t stat_get_ctime_nsec(struct stat *st) +{ + return 0; +} + +static inline void stat_set_ctime_nsec(struct stat *st, uint32_t nsec) +{ +} + +#endif + +/* + * Access second-resolution `struct stat` members. + */ + +static inline uint32_t stat_get_mtime_sec(struct stat *st) +{ + return st->st_mtime; +} + +static inline void stat_set_mtime_sec(struct stat *st, uint32_t sec) +{ + st->st_mtime = sec; +} + +static inline uint32_t stat_get_atime_sec(struct stat *st) +{ + return st->st_atime; +} + +static inline void stat_set_atime_sec(struct stat *st, uint32_t sec) +{ + st->st_atime = sec; +} + +static inline uint32_t stat_get_ctime_sec(struct stat *st) +{ + return st->st_ctime; +} + +static inline void stat_set_ctime_sec(struct stat *st, uint32_t sec) +{ + st->st_ctime = sec; +} + +#endif diff --git a/src/include/utime.h b/src/include/utime.h index e13460593062b..5bebc70a3422c 100644 --- a/src/include/utime.h +++ b/src/include/utime.h @@ -138,8 +138,9 @@ class utime_t { } void sleep() { - struct timespec ts = { (__time_t)tv.tv_sec, (long)tv.tv_nsec }; - nanosleep(&ts, &ts); + struct timespec ts; + to_timespec(&ts); + nanosleep(&ts, NULL); } // output diff --git a/src/init-radosgw b/src/init-radosgw index 1468c4bcdfd90..caa05cb5cc4d1 100644 --- a/src/init-radosgw +++ b/src/init-radosgw @@ -12,6 +12,24 @@ PATH=/sbin:/bin:/usr/bin . /lib/lsb/init-functions +daemon_is_running() { + daemon=$1 + if pidof $daemon >/dev/null; then + echo "$daemon is running." + exit 0 + else + echo "$daemon is not running." + exit 1 + fi +} + +VERBOSE=0 +for opt in $*; do + if [ "$opt" = "-v" ] || [ "$opt" = "--verbose" ]; then + VERBOSE=1 + fi +done + # prefix for radosgw instances in ceph.conf PREFIX='client.radosgw.' @@ -20,7 +38,8 @@ DEFAULT_USER='www-data' RADOSGW=`which radosgw` if [ ! -x "$RADOSGW" ]; then - exit 0 + [ $VERBOSE -eq 1 ] && echo "$RADOSGW could not start, it is not executable." + exit 1 fi case "$1" in @@ -40,7 +59,9 @@ case "$1" in # mapped to this host? host=`ceph-conf -n $name host` - if [ "$host" != `hostname` ]; then + hostname=`hostname -s` + if [ "$host" != "$hostname" ]; then + [ $VERBOSE -eq 1 ] && echo "hostname $hostname could not be found in ceph.conf:[$name], not starting." continue fi @@ -58,6 +79,7 @@ case "$1" in echo "Starting $name..." start-stop-daemon --start -u $user -x $RADOSGW -- -n $name done + daemon_is_running $RADOSGW ;; reload) echo "Reloading $name..." @@ -71,15 +93,10 @@ case "$1" in start-stop-daemon --stop -x $RADOSGW --oknodo ;; status) - if pidof $RADOSGW >/dev/null; then - echo "$RADOSGW is running." - else - echo "$RADOSGW is not running." - exit 1 - fi + daemon_is_running $RADOSGW ;; *) - echo "Usage: $0 start|stop|restart|force-reload|reload|status" >&2 + echo "Usage: $0 {start|stop|restart|force-reload|reload|status} [-v|--verbose]" >&2 exit 3 ;; esac diff --git a/src/init-radosgw.sysv b/src/init-radosgw.sysv index cba99aff18f34..869cc18e7dae4 100644 --- a/src/init-radosgw.sysv +++ b/src/init-radosgw.sysv @@ -13,6 +13,24 @@ PATH=/sbin:/bin:/usr/bin #. /lib/lsb/init-functions . /etc/rc.d/init.d/functions +daemon_is_running() { + daemon=$1 + if pidof $daemon >/dev/null; then + echo "$daemon is running." + exit 0 + else + echo "$daemon is not running." + exit 1 + fi +} + +VERBOSE=0 +for opt in $*; do + if [ "$opt" = "-v" ] || [ "$opt" = "--verbose" ]; then + VERBOSE=1 + fi +done + # prefix for radosgw instances in ceph.conf PREFIX='client.radosgw.' @@ -22,7 +40,8 @@ DEFAULT_USER='apache' RADOSGW=`which radosgw` if [ ! -x "$RADOSGW" ]; then - exit 0 + [ $VERBOSE -eq 1 ] && echo "$RADOSGW could not start, it is not executable." + exit 1 fi case "$1" in @@ -43,7 +62,9 @@ case "$1" in # mapped to this host? host=`ceph-conf -n $name host` - if [ "$host" != `hostname` ]; then + hostname=`hostname -s` + if [ "$host" != "$hostname" ]; then + [ $VERBOSE -eq 1 ] && echo "hostname $hostname could not be found in ceph.conf:[$name], not starting." continue fi @@ -62,6 +83,7 @@ case "$1" in daemon --user="$user" "$RADOSGW -n $name" echo "Starting $name..." done + daemon_is_running $RADOSGW ;; reload) #start-stop-daemon --signal HUP -x $RADOSGW --oknodo @@ -78,15 +100,10 @@ case "$1" in echo "Stopping radosgw instance(s)..." ;; status) - if pidof $RADOSGW >/dev/null; then - echo "$RADOSGW is running." - else - echo "$RADOSGW is not running." - exit 1 - fi + daemon_is_running $RADOSGW ;; *) - echo "Usage: $0 start|stop|restart|force-reload|reload|status" >&2 + echo "Usage: $0 {start|stop|restart|force-reload|reload|status} [-v|--verbose]" >&2 exit 3 ;; esac diff --git a/src/libcephfs.cc b/src/libcephfs.cc index cdd06f461e6a3..86f9763478122 100644 --- a/src/libcephfs.cc +++ b/src/libcephfs.cc @@ -31,7 +31,7 @@ #include "msg/Messenger.h" #include "include/assert.h" -class ceph_mount_info +struct ceph_mount_info { public: ceph_mount_info(uint64_t msgr_nonce_, CephContext *cct_) @@ -240,7 +240,7 @@ extern "C" const char *ceph_version(int *pmajor, int *pminor, int *ppatch) return VERSION; } -extern "C" int ceph_create_with_context(class ceph_mount_info **cmount, CephContext *cct) +extern "C" int ceph_create_with_context(struct ceph_mount_info **cmount, CephContext *cct) { uint64_t nonce = 0; @@ -249,11 +249,11 @@ extern "C" int ceph_create_with_context(class ceph_mount_info **cmount, CephCont nonce &= ~0xffff; nonce |= (uint64_t)getpid(); - *cmount = new class ceph_mount_info(nonce, cct); + *cmount = new struct ceph_mount_info(nonce, cct); return 0; } -extern "C" int ceph_create(class ceph_mount_info **cmount, const char * const id) +extern "C" int ceph_create(struct ceph_mount_info **cmount, const char * const id) { CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT); if (id) { @@ -266,12 +266,12 @@ extern "C" int ceph_create(class ceph_mount_info **cmount, const char * const id return ceph_create_with_context(cmount, cct); } -extern "C" int ceph_unmount(class ceph_mount_info *cmount) +extern "C" int ceph_unmount(struct ceph_mount_info *cmount) { return cmount->unmount(); } -extern "C" int ceph_release(class ceph_mount_info *cmount) +extern "C" int ceph_release(struct ceph_mount_info *cmount) { if (cmount->is_mounted()) return -EISCONN; @@ -279,35 +279,35 @@ extern "C" int ceph_release(class ceph_mount_info *cmount) return 0; } -extern "C" void ceph_shutdown(class ceph_mount_info *cmount) +extern "C" void ceph_shutdown(struct ceph_mount_info *cmount) { cmount->shutdown(); delete cmount; } -extern "C" int ceph_conf_read_file(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_conf_read_file(struct ceph_mount_info *cmount, const char *path) { return cmount->conf_read_file(path); } -extern "C" int ceph_conf_parse_argv(class ceph_mount_info *cmount, int argc, +extern "C" int ceph_conf_parse_argv(struct ceph_mount_info *cmount, int argc, const char **argv) { return cmount->conf_parse_argv(argc, argv); } -extern "C" int ceph_conf_parse_env(class ceph_mount_info *cmount, const char *name) +extern "C" int ceph_conf_parse_env(struct ceph_mount_info *cmount, const char *name) { return cmount->conf_parse_env(name); } -extern "C" int ceph_conf_set(class ceph_mount_info *cmount, const char *option, +extern "C" int ceph_conf_set(struct ceph_mount_info *cmount, const char *option, const char *value) { return cmount->conf_set(option, value); } -extern "C" int ceph_conf_get(class ceph_mount_info *cmount, const char *option, +extern "C" int ceph_conf_get(struct ceph_mount_info *cmount, const char *option, char *buf, size_t len) { if (buf == NULL) { @@ -316,7 +316,7 @@ extern "C" int ceph_conf_get(class ceph_mount_info *cmount, const char *option, return cmount->conf_get(option, buf, len); } -extern "C" int ceph_mount(class ceph_mount_info *cmount, const char *root) +extern "C" int ceph_mount(struct ceph_mount_info *cmount, const char *root) { std::string mount_root; if (root) @@ -324,12 +324,12 @@ extern "C" int ceph_mount(class ceph_mount_info *cmount, const char *root) return cmount->mount(mount_root); } -extern "C" int ceph_is_mounted(class ceph_mount_info *cmount) +extern "C" int ceph_is_mounted(struct ceph_mount_info *cmount) { return cmount->is_mounted() ? 1 : 0; } -extern "C" int ceph_statfs(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_statfs(struct ceph_mount_info *cmount, const char *path, struct statvfs *stbuf) { if (!cmount->is_mounted()) @@ -337,26 +337,26 @@ extern "C" int ceph_statfs(class ceph_mount_info *cmount, const char *path, return cmount->get_client()->statfs(path, stbuf); } -extern "C" int ceph_get_local_osd(class ceph_mount_info *cmount) +extern "C" int ceph_get_local_osd(struct ceph_mount_info *cmount) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->get_local_osd(); } -extern "C" const char* ceph_getcwd(class ceph_mount_info *cmount) +extern "C" const char* ceph_getcwd(struct ceph_mount_info *cmount) { return cmount->get_cwd(); } -extern "C" int ceph_chdir (class ceph_mount_info *cmount, const char *s) +extern "C" int ceph_chdir (struct ceph_mount_info *cmount, const char *s) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->chdir(s); } -extern "C" int ceph_opendir(class ceph_mount_info *cmount, +extern "C" int ceph_opendir(struct ceph_mount_info *cmount, const char *name, struct ceph_dir_result **dirpp) { if (!cmount->is_mounted()) @@ -364,14 +364,14 @@ extern "C" int ceph_opendir(class ceph_mount_info *cmount, return cmount->get_client()->opendir(name, (dir_result_t **)dirpp); } -extern "C" int ceph_closedir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp) +extern "C" int ceph_closedir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->closedir((dir_result_t*)dirp); } -extern "C" struct dirent * ceph_readdir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp) +extern "C" struct dirent * ceph_readdir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp) { if (!cmount->is_mounted()) { /* Client::readdir also sets errno to signal errors. */ @@ -381,14 +381,14 @@ extern "C" struct dirent * ceph_readdir(class ceph_mount_info *cmount, struct ce return cmount->get_client()->readdir((dir_result_t*)dirp); } -extern "C" int ceph_readdir_r(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de) +extern "C" int ceph_readdir_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->readdir_r((dir_result_t*)dirp, de); } -extern "C" int ceph_readdirplus_r(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, +extern "C" int ceph_readdirplus_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de, struct stat *st, int *stmask) { if (!cmount->is_mounted()) @@ -396,7 +396,7 @@ extern "C" int ceph_readdirplus_r(class ceph_mount_info *cmount, struct ceph_dir return cmount->get_client()->readdirplus_r((dir_result_t*)dirp, de, st, stmask); } -extern "C" int ceph_getdents(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, +extern "C" int ceph_getdents(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, char *buf, int buflen) { if (!cmount->is_mounted()) @@ -404,7 +404,7 @@ extern "C" int ceph_getdents(class ceph_mount_info *cmount, struct ceph_dir_resu return cmount->get_client()->getdents((dir_result_t*)dirp, buf, buflen); } -extern "C" int ceph_getdnames(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, +extern "C" int ceph_getdnames(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, char *buf, int buflen) { if (!cmount->is_mounted()) @@ -412,28 +412,28 @@ extern "C" int ceph_getdnames(class ceph_mount_info *cmount, struct ceph_dir_res return cmount->get_client()->getdnames((dir_result_t*)dirp, buf, buflen); } -extern "C" void ceph_rewinddir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp) +extern "C" void ceph_rewinddir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp) { if (!cmount->is_mounted()) return; cmount->get_client()->rewinddir((dir_result_t*)dirp); } -extern "C" loff_t ceph_telldir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp) +extern "C" loff_t ceph_telldir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->telldir((dir_result_t*)dirp); } -extern "C" void ceph_seekdir(class ceph_mount_info *cmount, struct ceph_dir_result *dirp, loff_t offset) +extern "C" void ceph_seekdir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, loff_t offset) { if (!cmount->is_mounted()) return; cmount->get_client()->seekdir((dir_result_t*)dirp, offset); } -extern "C" int ceph_link (class ceph_mount_info *cmount, const char *existing, +extern "C" int ceph_link (struct ceph_mount_info *cmount, const char *existing, const char *newname) { if (!cmount->is_mounted()) @@ -441,14 +441,14 @@ extern "C" int ceph_link (class ceph_mount_info *cmount, const char *existing, return cmount->get_client()->link(existing, newname); } -extern "C" int ceph_unlink(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_unlink(struct ceph_mount_info *cmount, const char *path) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->unlink(path); } -extern "C" int ceph_rename(class ceph_mount_info *cmount, const char *from, +extern "C" int ceph_rename(struct ceph_mount_info *cmount, const char *from, const char *to) { if (!cmount->is_mounted()) @@ -457,21 +457,21 @@ extern "C" int ceph_rename(class ceph_mount_info *cmount, const char *from, } // dirs -extern "C" int ceph_mkdir(class ceph_mount_info *cmount, const char *path, mode_t mode) +extern "C" int ceph_mkdir(struct ceph_mount_info *cmount, const char *path, mode_t mode) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->mkdir(path, mode); } -extern "C" int ceph_mkdirs(class ceph_mount_info *cmount, const char *path, mode_t mode) +extern "C" int ceph_mkdirs(struct ceph_mount_info *cmount, const char *path, mode_t mode) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->mkdirs(path, mode); } -extern "C" int ceph_rmdir(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_rmdir(struct ceph_mount_info *cmount, const char *path) { if (!cmount->is_mounted()) return -ENOTCONN; @@ -479,7 +479,7 @@ extern "C" int ceph_rmdir(class ceph_mount_info *cmount, const char *path) } // symlinks -extern "C" int ceph_readlink(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_readlink(struct ceph_mount_info *cmount, const char *path, char *buf, loff_t size) { if (!cmount->is_mounted()) @@ -487,7 +487,7 @@ extern "C" int ceph_readlink(class ceph_mount_info *cmount, const char *path, return cmount->get_client()->readlink(path, buf, size); } -extern "C" int ceph_symlink(class ceph_mount_info *cmount, const char *existing, +extern "C" int ceph_symlink(struct ceph_mount_info *cmount, const char *existing, const char *newname) { if (!cmount->is_mounted()) @@ -496,7 +496,7 @@ extern "C" int ceph_symlink(class ceph_mount_info *cmount, const char *existing, } // inode stuff -extern "C" int ceph_stat(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_stat(struct ceph_mount_info *cmount, const char *path, struct stat *stbuf) { if (!cmount->is_mounted()) @@ -504,7 +504,7 @@ extern "C" int ceph_stat(class ceph_mount_info *cmount, const char *path, return cmount->get_client()->stat(path, stbuf); } -extern "C" int ceph_lstat(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_lstat(struct ceph_mount_info *cmount, const char *path, struct stat *stbuf) { if (!cmount->is_mounted()) @@ -512,7 +512,7 @@ extern "C" int ceph_lstat(class ceph_mount_info *cmount, const char *path, return cmount->get_client()->lstat(path, stbuf); } -extern "C" int ceph_setattr(class ceph_mount_info *cmount, const char *relpath, +extern "C" int ceph_setattr(struct ceph_mount_info *cmount, const char *relpath, struct stat *attr, int mask) { if (!cmount->is_mounted()) @@ -521,56 +521,56 @@ extern "C" int ceph_setattr(class ceph_mount_info *cmount, const char *relpath, } // *xattr() calls supporting samba/vfs -extern "C" int ceph_getxattr(class ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size) +extern "C" int ceph_getxattr(struct ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->getxattr(path, name, value, size); } -extern "C" int ceph_lgetxattr(class ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size) +extern "C" int ceph_lgetxattr(struct ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->lgetxattr(path, name, value, size); } -extern "C" int ceph_listxattr(class ceph_mount_info *cmount, const char *path, char *list, size_t size) +extern "C" int ceph_listxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->listxattr(path, list, size); } -extern "C" int ceph_llistxattr(class ceph_mount_info *cmount, const char *path, char *list, size_t size) +extern "C" int ceph_llistxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->llistxattr(path, list, size); } -extern "C" int ceph_removexattr(class ceph_mount_info *cmount, const char *path, const char *name) +extern "C" int ceph_removexattr(struct ceph_mount_info *cmount, const char *path, const char *name) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->removexattr(path, name); } -extern "C" int ceph_lremovexattr(class ceph_mount_info *cmount, const char *path, const char *name) +extern "C" int ceph_lremovexattr(struct ceph_mount_info *cmount, const char *path, const char *name) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->lremovexattr(path, name); } -extern "C" int ceph_setxattr(class ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags) +extern "C" int ceph_setxattr(struct ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->setxattr(path, name, value, size, flags); } -extern "C" int ceph_lsetxattr(class ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags) +extern "C" int ceph_lsetxattr(struct ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags) { if (!cmount->is_mounted()) return -ENOTCONN; @@ -578,33 +578,33 @@ extern "C" int ceph_lsetxattr(class ceph_mount_info *cmount, const char *path, c } /* end xattr support */ -extern "C" int ceph_chmod(class ceph_mount_info *cmount, const char *path, mode_t mode) +extern "C" int ceph_chmod(struct ceph_mount_info *cmount, const char *path, mode_t mode) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->chmod(path, mode); } -extern "C" int ceph_fchmod(class ceph_mount_info *cmount, int fd, mode_t mode) +extern "C" int ceph_fchmod(struct ceph_mount_info *cmount, int fd, mode_t mode) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->fchmod(fd, mode); } -extern "C" int ceph_chown(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_chown(struct ceph_mount_info *cmount, const char *path, int uid, int gid) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->chown(path, uid, gid); } -extern "C" int ceph_fchown(class ceph_mount_info *cmount, int fd, +extern "C" int ceph_fchown(struct ceph_mount_info *cmount, int fd, int uid, int gid) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->fchown(fd, uid, gid); } -extern "C" int ceph_lchown(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_lchown(struct ceph_mount_info *cmount, const char *path, int uid, int gid) { if (!cmount->is_mounted()) @@ -613,7 +613,7 @@ extern "C" int ceph_lchown(class ceph_mount_info *cmount, const char *path, } -extern "C" int ceph_utime(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_utime(struct ceph_mount_info *cmount, const char *path, struct utimbuf *buf) { if (!cmount->is_mounted()) @@ -621,7 +621,7 @@ extern "C" int ceph_utime(class ceph_mount_info *cmount, const char *path, return cmount->get_client()->utime(path, buf); } -extern "C" int ceph_truncate(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_truncate(struct ceph_mount_info *cmount, const char *path, loff_t size) { if (!cmount->is_mounted()) @@ -630,7 +630,7 @@ extern "C" int ceph_truncate(class ceph_mount_info *cmount, const char *path, } // file ops -extern "C" int ceph_mknod(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_mknod(struct ceph_mount_info *cmount, const char *path, mode_t mode, dev_t rdev) { if (!cmount->is_mounted()) @@ -638,7 +638,7 @@ extern "C" int ceph_mknod(class ceph_mount_info *cmount, const char *path, return cmount->get_client()->mknod(path, mode, rdev); } -extern "C" int ceph_open(class ceph_mount_info *cmount, const char *path, +extern "C" int ceph_open(struct ceph_mount_info *cmount, const char *path, int flags, mode_t mode) { if (!cmount->is_mounted()) @@ -646,7 +646,7 @@ extern "C" int ceph_open(class ceph_mount_info *cmount, const char *path, return cmount->get_client()->open(path, flags, mode); } -extern "C" int ceph_open_layout(class ceph_mount_info *cmount, const char *path, int flags, +extern "C" int ceph_open_layout(struct ceph_mount_info *cmount, const char *path, int flags, mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool) { if (!cmount->is_mounted()) @@ -655,14 +655,14 @@ extern "C" int ceph_open_layout(class ceph_mount_info *cmount, const char *path, stripe_count, object_size, data_pool); } -extern "C" int ceph_close(class ceph_mount_info *cmount, int fd) +extern "C" int ceph_close(struct ceph_mount_info *cmount, int fd) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->close(fd); } -extern "C" loff_t ceph_lseek(class ceph_mount_info *cmount, int fd, +extern "C" loff_t ceph_lseek(struct ceph_mount_info *cmount, int fd, loff_t offset, int whence) { if (!cmount->is_mounted()) @@ -670,7 +670,7 @@ extern "C" loff_t ceph_lseek(class ceph_mount_info *cmount, int fd, return cmount->get_client()->lseek(fd, offset, whence); } -extern "C" int ceph_read(class ceph_mount_info *cmount, int fd, char *buf, +extern "C" int ceph_read(struct ceph_mount_info *cmount, int fd, char *buf, loff_t size, loff_t offset) { if (!cmount->is_mounted()) @@ -678,7 +678,7 @@ extern "C" int ceph_read(class ceph_mount_info *cmount, int fd, char *buf, return cmount->get_client()->read(fd, buf, size, offset); } -extern "C" int ceph_write(class ceph_mount_info *cmount, int fd, const char *buf, +extern "C" int ceph_write(struct ceph_mount_info *cmount, int fd, const char *buf, loff_t size, loff_t offset) { if (!cmount->is_mounted()) @@ -686,21 +686,21 @@ extern "C" int ceph_write(class ceph_mount_info *cmount, int fd, const char *buf return cmount->get_client()->write(fd, buf, size, offset); } -extern "C" int ceph_ftruncate(class ceph_mount_info *cmount, int fd, loff_t size) +extern "C" int ceph_ftruncate(struct ceph_mount_info *cmount, int fd, loff_t size) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->ftruncate(fd, size); } -extern "C" int ceph_fsync(class ceph_mount_info *cmount, int fd, int syncdataonly) +extern "C" int ceph_fsync(struct ceph_mount_info *cmount, int fd, int syncdataonly) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->fsync(fd, syncdataonly); } -extern "C" int ceph_fallocate(class ceph_mount_info *cmount, int fd, int mode, +extern "C" int ceph_fallocate(struct ceph_mount_info *cmount, int fd, int mode, loff_t offset, loff_t length) { if (!cmount->is_mounted()) @@ -708,14 +708,14 @@ extern "C" int ceph_fallocate(class ceph_mount_info *cmount, int fd, int mode, return cmount->get_client()->fallocate(fd, mode, offset, length); } -extern "C" int ceph_fstat(class ceph_mount_info *cmount, int fd, struct stat *stbuf) +extern "C" int ceph_fstat(struct ceph_mount_info *cmount, int fd, struct stat *stbuf) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->fstat(fd, stbuf); } -extern "C" int ceph_sync_fs(class ceph_mount_info *cmount) +extern "C" int ceph_sync_fs(struct ceph_mount_info *cmount) { if (!cmount->is_mounted()) return -ENOTCONN; @@ -723,7 +723,7 @@ extern "C" int ceph_sync_fs(class ceph_mount_info *cmount) } -extern "C" int ceph_get_file_stripe_unit(class ceph_mount_info *cmount, int fh) +extern "C" int ceph_get_file_stripe_unit(struct ceph_mount_info *cmount, int fh) { struct ceph_file_layout l; int r; @@ -736,7 +736,7 @@ extern "C" int ceph_get_file_stripe_unit(class ceph_mount_info *cmount, int fh) return l.fl_stripe_unit; } -extern "C" int ceph_get_path_stripe_unit(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_get_path_stripe_unit(struct ceph_mount_info *cmount, const char *path) { struct ceph_file_layout l; int r; @@ -749,7 +749,7 @@ extern "C" int ceph_get_path_stripe_unit(class ceph_mount_info *cmount, const ch return l.fl_stripe_unit; } -extern "C" int ceph_get_file_stripe_count(class ceph_mount_info *cmount, int fh) +extern "C" int ceph_get_file_stripe_count(struct ceph_mount_info *cmount, int fh) { struct ceph_file_layout l; int r; @@ -762,7 +762,7 @@ extern "C" int ceph_get_file_stripe_count(class ceph_mount_info *cmount, int fh) return l.fl_stripe_count; } -extern "C" int ceph_get_path_stripe_count(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_get_path_stripe_count(struct ceph_mount_info *cmount, const char *path) { struct ceph_file_layout l; int r; @@ -775,7 +775,7 @@ extern "C" int ceph_get_path_stripe_count(class ceph_mount_info *cmount, const c return l.fl_stripe_count; } -extern "C" int ceph_get_file_object_size(class ceph_mount_info *cmount, int fh) +extern "C" int ceph_get_file_object_size(struct ceph_mount_info *cmount, int fh) { struct ceph_file_layout l; int r; @@ -788,7 +788,7 @@ extern "C" int ceph_get_file_object_size(class ceph_mount_info *cmount, int fh) return l.fl_object_size; } -extern "C" int ceph_get_path_object_size(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_get_path_object_size(struct ceph_mount_info *cmount, const char *path) { struct ceph_file_layout l; int r; @@ -801,7 +801,7 @@ extern "C" int ceph_get_path_object_size(class ceph_mount_info *cmount, const ch return l.fl_object_size; } -extern "C" int ceph_get_file_pool(class ceph_mount_info *cmount, int fh) +extern "C" int ceph_get_file_pool(struct ceph_mount_info *cmount, int fh) { struct ceph_file_layout l; int r; @@ -814,7 +814,7 @@ extern "C" int ceph_get_file_pool(class ceph_mount_info *cmount, int fh) return l.fl_pg_pool; } -extern "C" int ceph_get_path_pool(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_get_path_pool(struct ceph_mount_info *cmount, const char *path) { struct ceph_file_layout l; int r; @@ -827,7 +827,7 @@ extern "C" int ceph_get_path_pool(class ceph_mount_info *cmount, const char *pat return l.fl_pg_pool; } -extern "C" int ceph_get_file_pool_name(class ceph_mount_info *cmount, int fh, char *buf, size_t len) +extern "C" int ceph_get_file_pool_name(struct ceph_mount_info *cmount, int fh, char *buf, size_t len) { struct ceph_file_layout l; int r; @@ -846,7 +846,7 @@ extern "C" int ceph_get_file_pool_name(class ceph_mount_info *cmount, int fh, ch return name.length(); } -extern "C" int ceph_get_pool_name(class ceph_mount_info *cmount, int pool, char *buf, size_t len) +extern "C" int ceph_get_pool_name(struct ceph_mount_info *cmount, int pool, char *buf, size_t len) { if (!cmount->is_mounted()) return -ENOTCONN; @@ -859,7 +859,7 @@ extern "C" int ceph_get_pool_name(class ceph_mount_info *cmount, int pool, char return name.length(); } -extern "C" int ceph_get_path_pool_name(class ceph_mount_info *cmount, const char *path, char *buf, size_t len) +extern "C" int ceph_get_path_pool_name(struct ceph_mount_info *cmount, const char *path, char *buf, size_t len) { struct ceph_file_layout l; int r; @@ -878,7 +878,7 @@ extern "C" int ceph_get_path_pool_name(class ceph_mount_info *cmount, const char return name.length(); } -extern "C" int ceph_get_file_layout(class ceph_mount_info *cmount, int fh, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool) +extern "C" int ceph_get_file_layout(struct ceph_mount_info *cmount, int fh, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool) { struct ceph_file_layout l; int r; @@ -899,7 +899,7 @@ extern "C" int ceph_get_file_layout(class ceph_mount_info *cmount, int fh, int * return 0; } -extern "C" int ceph_get_path_layout(class ceph_mount_info *cmount, const char *path, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool) +extern "C" int ceph_get_path_layout(struct ceph_mount_info *cmount, const char *path, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool) { struct ceph_file_layout l; int r; @@ -920,7 +920,7 @@ extern "C" int ceph_get_path_layout(class ceph_mount_info *cmount, const char *p return 0; } -extern "C" int ceph_get_file_replication(class ceph_mount_info *cmount, int fh) +extern "C" int ceph_get_file_replication(struct ceph_mount_info *cmount, int fh) { struct ceph_file_layout l; int r; @@ -934,7 +934,7 @@ extern "C" int ceph_get_file_replication(class ceph_mount_info *cmount, int fh) return rep; } -extern "C" int ceph_get_path_replication(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_get_path_replication(struct ceph_mount_info *cmount, const char *path) { struct ceph_file_layout l; int r; @@ -948,40 +948,40 @@ extern "C" int ceph_get_path_replication(class ceph_mount_info *cmount, const ch return rep; } -extern "C" int ceph_set_default_file_stripe_unit(class ceph_mount_info *cmount, +extern "C" int ceph_set_default_file_stripe_unit(struct ceph_mount_info *cmount, int stripe) { // this option no longer exists return -EOPNOTSUPP; } -extern "C" int ceph_set_default_file_stripe_count(class ceph_mount_info *cmount, +extern "C" int ceph_set_default_file_stripe_count(struct ceph_mount_info *cmount, int count) { // this option no longer exists return -EOPNOTSUPP; } -extern "C" int ceph_set_default_object_size(class ceph_mount_info *cmount, int size) +extern "C" int ceph_set_default_object_size(struct ceph_mount_info *cmount, int size) { // this option no longer exists return -EOPNOTSUPP; } -extern "C" int ceph_set_default_file_replication(class ceph_mount_info *cmount, +extern "C" int ceph_set_default_file_replication(struct ceph_mount_info *cmount, int replication) { // this option no longer exists return -EOPNOTSUPP; } -extern "C" int ceph_set_default_preferred_pg(class ceph_mount_info *cmount, int osd) +extern "C" int ceph_set_default_preferred_pg(struct ceph_mount_info *cmount, int osd) { // this option no longer exists return -EOPNOTSUPP; } -extern "C" int ceph_get_file_extent_osds(class ceph_mount_info *cmount, int fh, +extern "C" int ceph_get_file_extent_osds(struct ceph_mount_info *cmount, int fh, loff_t offset, loff_t *length, int *osds, int nosds) { if (nosds < 0) @@ -1007,7 +1007,7 @@ extern "C" int ceph_get_file_extent_osds(class ceph_mount_info *cmount, int fh, return vosds.size(); } -extern "C" int ceph_get_osd_crush_location(class ceph_mount_info *cmount, +extern "C" int ceph_get_osd_crush_location(struct ceph_mount_info *cmount, int osd, char *path, size_t len) { if (!cmount->is_mounted()) @@ -1045,7 +1045,7 @@ extern "C" int ceph_get_osd_crush_location(class ceph_mount_info *cmount, return needed; } -extern "C" int ceph_get_osd_addr(class ceph_mount_info *cmount, int osd, +extern "C" int ceph_get_osd_addr(struct ceph_mount_info *cmount, int osd, struct sockaddr_storage *addr) { if (!cmount->is_mounted()) @@ -1064,7 +1064,7 @@ extern "C" int ceph_get_osd_addr(class ceph_mount_info *cmount, int osd, return 0; } -extern "C" int ceph_get_file_stripe_address(class ceph_mount_info *cmount, int fh, +extern "C" int ceph_get_file_stripe_address(struct ceph_mount_info *cmount, int fh, loff_t offset, struct sockaddr_storage *addr, int naddr) { vector address; @@ -1091,7 +1091,7 @@ extern "C" int ceph_get_file_stripe_address(class ceph_mount_info *cmount, int f return address.size(); } -extern "C" int ceph_localize_reads(class ceph_mount_info *cmount, int val) +extern "C" int ceph_localize_reads(struct ceph_mount_info *cmount, int val) { if (!cmount->is_mounted()) return -ENOTCONN; @@ -1102,33 +1102,33 @@ extern "C" int ceph_localize_reads(class ceph_mount_info *cmount, int val) return 0; } -extern "C" CephContext *ceph_get_mount_context(class ceph_mount_info *cmount) +extern "C" CephContext *ceph_get_mount_context(struct ceph_mount_info *cmount) { return cmount->get_ceph_context(); } -extern "C" int ceph_debug_get_fd_caps(class ceph_mount_info *cmount, int fd) +extern "C" int ceph_debug_get_fd_caps(struct ceph_mount_info *cmount, int fd) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->get_caps_issued(fd); } -extern "C" int ceph_debug_get_file_caps(class ceph_mount_info *cmount, const char *path) +extern "C" int ceph_debug_get_file_caps(struct ceph_mount_info *cmount, const char *path) { if (!cmount->is_mounted()) return -ENOTCONN; return cmount->get_client()->get_caps_issued(path); } -extern "C" int ceph_get_stripe_unit_granularity(class ceph_mount_info *cmount) +extern "C" int ceph_get_stripe_unit_granularity(struct ceph_mount_info *cmount) { if (!cmount->is_mounted()) return -ENOTCONN; return CEPH_MIN_STRIPE_UNIT; } -extern "C" int ceph_get_pool_id(class ceph_mount_info *cmount, const char *pool_name) +extern "C" int ceph_get_pool_id(struct ceph_mount_info *cmount, const char *pool_name) { if (!cmount->is_mounted()) return -ENOTCONN; @@ -1145,7 +1145,7 @@ extern "C" int ceph_get_pool_id(class ceph_mount_info *cmount, const char *pool_ return (int)pool_id; } -extern "C" int ceph_get_pool_replication(class ceph_mount_info *cmount, int pool_id) +extern "C" int ceph_get_pool_replication(struct ceph_mount_info *cmount, int pool_id) { if (!cmount->is_mounted()) return -ENOTCONN; diff --git a/src/mon/MDSMonitor.cc b/src/mon/MDSMonitor.cc index b865c379d1a07..117f84f85f611 100644 --- a/src/mon/MDSMonitor.cc +++ b/src/mon/MDSMonitor.cc @@ -570,10 +570,8 @@ bool MDSMonitor::preprocess_command(MMonCommand *m) } r = 0; } else if (prefix == "mds dump") { - string val; int64_t epocharg; epoch_t epoch; - epoch = epocharg; MDSMap *p = &mdsmap; if (cmd_getval(g_ceph_context, cmdmap, "epoch", epocharg)) { @@ -634,7 +632,6 @@ bool MDSMonitor::preprocess_command(MMonCommand *m) } else if (prefix == "mds tell") { string whostr; cmd_getval(g_ceph_context, cmdmap, "who", whostr); - string args; vectorargs_vec; cmd_getval(g_ceph_context, cmdmap, "args", args_vec); diff --git a/src/mon/MonMap.cc b/src/mon/MonMap.cc index 7012e0545cf07..cf481234befbc 100644 --- a/src/mon/MonMap.cc +++ b/src/mon/MonMap.cc @@ -1,6 +1,7 @@ #include "MonMap.h" +#include #include #include #include diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index c46fa5215d4f6..2c17cca85b7eb 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -2659,7 +2659,7 @@ int OSDMonitor::prepare_new_pool(string& name, uint64_t auid, int crush_rule, pi->auid = auid; for (vector::const_iterator i = properties.begin(); i != properties.end(); - i++) { + ++i) { size_t equal = i->find('='); if (equal == string::npos) pi->properties[*i] = string(); @@ -2796,10 +2796,15 @@ int OSDMonitor::prepare_command_pool_set(map &cmdmap, } if (n <= (int)p.get_pg_num()) { ss << "specified pg_num " << n << " <= current " << p.get_pg_num(); - } else if (!mon->pgmon()->pg_map.creating_pgs.empty()) { - ss << "currently creating pgs, wait"; - return -EAGAIN; } else { + for(set::iterator i = mon->pgmon()->pg_map.creating_pgs.begin(); + i != mon->pgmon()->pg_map.creating_pgs.end(); + ++i) { + if (i->m_pool == static_cast(pool)) { + ss << "currently creating pgs, wait"; + return -EAGAIN; + } + } p.set_pg_num(n); ss << "set pool " << pool << " pg_num to " << n; } @@ -2812,10 +2817,15 @@ int OSDMonitor::prepare_command_pool_set(map &cmdmap, ss << "specified pgp_num must > 0, but you set to " << n; } else if (n > (int)p.get_pg_num()) { ss << "specified pgp_num " << n << " > pg_num " << p.get_pg_num(); - } else if (!mon->pgmon()->pg_map.creating_pgs.empty()) { - ss << "still creating pgs, wait"; - return -EAGAIN; } else { + for(set::iterator i = mon->pgmon()->pg_map.creating_pgs.begin(); + i != mon->pgmon()->pg_map.creating_pgs.end(); + ++i) { + if (i->m_pool == static_cast(pool)) { + ss << "currently creating pgs, wait"; + return -EAGAIN; + } + } p.set_pgp_num(n); ss << "set pool " << pool << " pgp_num to " << n; } @@ -2938,7 +2948,7 @@ bool OSDMonitor::prepare_command(MMonCommand *m) // sanity check: test some inputs to make sure this map isn't totally broken dout(10) << " testing map" << dendl; stringstream ess; - CrushTester tester(crush, ess, 1); + CrushTester tester(crush, ess); tester.test(); dout(10) << " result " << ess.str() << dendl; @@ -4367,7 +4377,7 @@ int OSDMonitor::_prepare_rename_pool(int64_t pool, string newname) for (map::iterator p = pending_inc.new_pool_names.begin(); p != pending_inc.new_pool_names.end(); ++p) { - if (p->second == newname && (uint64_t)p->first != pool) { + if (p->second == newname && p->first != pool) { return -EEXIST; } } diff --git a/src/mon/PGMap.cc b/src/mon/PGMap.cc index ab4c885df4c02..2ba8402b34f60 100644 --- a/src/mon/PGMap.cc +++ b/src/mon/PGMap.cc @@ -787,7 +787,7 @@ void PGMap::print_osd_perf_stats(std::ostream *ss) const } void PGMap::recovery_summary(Formatter *f, ostream *out, - pool_stat_t delta_sum) const + const pool_stat_t& delta_sum) const { bool first = true; if (delta_sum.stats.sum.num_objects_degraded) { @@ -825,7 +825,7 @@ void PGMap::recovery_summary(Formatter *f, ostream *out, } void PGMap::recovery_rate_summary(Formatter *f, ostream *out, - pool_stat_t delta_sum, + const pool_stat_t& delta_sum, utime_t delta_stamp) const { // make non-negative; we can get negative values if osds send @@ -886,7 +886,7 @@ void PGMap::pool_recovery_summary(Formatter *f, ostream *out, } void PGMap::client_io_rate_summary(Formatter *f, ostream *out, - pool_stat_t delta_sum, + const pool_stat_t& delta_sum, utime_t delta_stamp) const { pool_stat_t pos_delta = delta_sum; diff --git a/src/mon/PGMap.h b/src/mon/PGMap.h index c8ce7fd973eba..8a931ecbcca67 100644 --- a/src/mon/PGMap.h +++ b/src/mon/PGMap.h @@ -244,12 +244,12 @@ class PGMap { void print_osd_perf_stats(std::ostream *ss) const; void recovery_summary(Formatter *f, ostream *out, - pool_stat_t delta_sum) const; + const pool_stat_t& delta_sum) const; void overall_recovery_summary(Formatter *f, ostream *out) const; void pool_recovery_summary(Formatter *f, ostream *out, uint64_t poolid) const; void recovery_rate_summary(Formatter *f, ostream *out, - pool_stat_t delta_sum, + const pool_stat_t& delta_sum, utime_t delta_stamp) const; void overall_recovery_rate_summary(Formatter *f, ostream *out) const; void pool_recovery_rate_summary(Formatter *f, ostream *out, @@ -259,7 +259,7 @@ class PGMap { * given @p delta_sum pool over a given @p delta_stamp period of time. */ void client_io_rate_summary(Formatter *f, ostream *out, - pool_stat_t delta_sum, + const pool_stat_t& delta_sum, utime_t delta_stamp) const; /** * Obtain a formatted/plain output for the overall client I/O, which is diff --git a/src/os/FileStore.cc b/src/os/FileStore.cc index ffac501aaf20d..89a55b393db7c 100644 --- a/src/os/FileStore.cc +++ b/src/os/FileStore.cc @@ -4273,6 +4273,14 @@ int FileStore::_collection_move_rename(coll_t oldcid, const ghobject_t& oldoid, int r = 0; int dstcmp, srccmp; + if (replaying) { + /* If the destination collection doesn't exist during replay, + * we need to delete the src object and continue on + */ + if (!collection_exists(c)) + goto out_rm_src; + } + dstcmp = _check_replay_guard(c, o, spos); if (dstcmp < 0) goto out_rm_src; diff --git a/src/os/ObjectStore.cc b/src/os/ObjectStore.cc index 1a1bbcb0b67de..327c64167d5cf 100644 --- a/src/os/ObjectStore.cc +++ b/src/os/ObjectStore.cc @@ -504,7 +504,7 @@ int ObjectStore::collection_list(coll_t c, vector& o) int ret = collection_list(c, go); if (ret == 0) { o.reserve(go.size()); - for (vector::iterator i = go.begin(); i != go.end() ; i++) + for (vector::iterator i = go.begin(); i != go.end() ; ++i) o.push_back(i->hobj); } return ret; @@ -520,7 +520,7 @@ int ObjectStore::collection_list_partial(coll_t c, hobject_t start, if (ret == 0) { *next = gnext.hobj; ls->reserve(go.size()); - for (vector::iterator i = go.begin(); i != go.end() ; i++) + for (vector::iterator i = go.begin(); i != go.end() ; ++i) ls->push_back(i->hobj); } return ret; @@ -534,7 +534,7 @@ int ObjectStore::collection_list_range(coll_t c, hobject_t start, hobject_t end, int ret = collection_list_range(c, gstart, gend, seq, &go); if (ret == 0) { ls->reserve(go.size()); - for (vector::iterator i = go.begin(); i != go.end() ; i++) + for (vector::iterator i = go.begin(); i != go.end() ; ++i) ls->push_back(i->hobj); } return ret; diff --git a/src/os/WBThrottle.cc b/src/os/WBThrottle.cc index e02c17677bbde..9430c4fd3cde4 100644 --- a/src/os/WBThrottle.cc +++ b/src/os/WBThrottle.cc @@ -1,6 +1,8 @@ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab +#include "acconfig.h" + #include "os/WBThrottle.h" #include "common/perf_counters.h" @@ -146,8 +148,10 @@ void *WBThrottle::entry() clearing = wb.get<0>(); lock.Unlock(); ::fdatasync(**wb.get<1>()); +#ifdef HAVE_POSIX_FADVISE if (wb.get<2>().nocache) posix_fadvise(**wb.get<1>(), 0, 0, POSIX_FADV_DONTNEED); +#endif lock.Lock(); clearing = ghobject_t(); cur_ios -= wb.get<2>().ios; diff --git a/src/os/chain_xattr.cc b/src/os/chain_xattr.cc index 8ca815689ed36..c020c9db84398 100644 --- a/src/os/chain_xattr.cc +++ b/src/os/chain_xattr.cc @@ -388,6 +388,10 @@ int chain_listxattr(const char *fn, char *names, size_t len) { int chain_flistxattr(int fd, char *names, size_t len) { int r; + char *p; + const char * end; + char *dest; + char *dest_end; if (!len) return sys_flistxattr(fd, names, len) * 2; @@ -403,12 +407,12 @@ int chain_flistxattr(int fd, char *names, size_t len) { r = sys_flistxattr(fd, full_buf, total_len); if (r < 0) - return r; + goto done; - char *p = full_buf; - const char *end = full_buf + r; - char *dest = names; - char *dest_end = names + len; + p = full_buf; + end = full_buf + r; + dest = names; + dest_end = names + len; while (p < end) { char name[CHAIN_XATTR_MAX_NAME_LEN * 2 + 16]; diff --git a/src/osd/ErasureCodePlugin.cc b/src/osd/ErasureCodePlugin.cc index d8b9ae0fbbd81..5d0b6904e1ea2 100644 --- a/src/osd/ErasureCodePlugin.cc +++ b/src/osd/ErasureCodePlugin.cc @@ -45,7 +45,7 @@ ErasureCodePluginRegistry::~ErasureCodePluginRegistry() { for (std::map::iterator i = plugins.begin(); i != plugins.end(); - i++) { + ++i) { void *library = i->second->library; delete i->second; dlclose(library); @@ -112,6 +112,7 @@ int ErasureCodePluginRegistry::load(const std::string &plugin_name, if (r != 0) { derr << "erasure_code_init(" << plugin_name << "): " << strerror(-r) << dendl; + dlclose(library); return r; } } else { diff --git a/src/osd/ErasureCodePluginJerasure/ErasureCodeJerasure.cc b/src/osd/ErasureCodePluginJerasure/ErasureCodeJerasure.cc index f2be1ed06e711..fe656e58ee00f 100644 --- a/src/osd/ErasureCodePluginJerasure/ErasureCodeJerasure.cc +++ b/src/osd/ErasureCodePluginJerasure/ErasureCodeJerasure.cc @@ -54,7 +54,7 @@ int ErasureCodeJerasure::minimum_to_decode(const set &want_to_read, return -EIO; set::iterator i; unsigned j; - for (i = available_chunks.begin(), j = 0; j < (unsigned)k; i++, j++) + for (i = available_chunks.begin(), j = 0; j < (unsigned)k; ++i, j++) minimum->insert(*i); } return 0; @@ -67,7 +67,7 @@ int ErasureCodeJerasure::minimum_to_decode_with_cost(const set &want_to_rea set available_chunks; for (map::const_iterator i = available.begin(); i != available.end(); - i++) + ++i) available_chunks.insert(i->first); return minimum_to_decode(want_to_read, available_chunks, minimum); } diff --git a/src/osd/ErasureCodePluginJerasure/galois.c b/src/osd/ErasureCodePluginJerasure/galois.c index be8be59affa0c..0de6fbd334cfe 100755 --- a/src/osd/ErasureCodePluginJerasure/galois.c +++ b/src/osd/ErasureCodePluginJerasure/galois.c @@ -693,7 +693,7 @@ void galois_w32_region_multiply(char *region, /* Region to multiply */ nbytes /= sizeof(int); if (galois_split_w8[0]== NULL) { - if (galois_create_split_w8_tables(8) < 0) { + if (galois_create_split_w8_tables() < 0) { fprintf(stderr, "galois_32_region_multiply -- couldn't make split multiplication tables\n"); exit(1); } diff --git a/src/osd/ErasureCodePluginJerasure/jerasure.c b/src/osd/ErasureCodePluginJerasure/jerasure.c index 9efae02e5fb21..e5df475001c1c 100755 --- a/src/osd/ErasureCodePluginJerasure/jerasure.c +++ b/src/osd/ErasureCodePluginJerasure/jerasure.c @@ -276,7 +276,7 @@ int *jerasure_matrix_to_bitmatrix(int k, int m, int w, int *matrix) int rowelts, rowindex, colindex, elt, i, j, l, x; bitmatrix = talloc(int, k*m*w*w); - if (matrix == NULL) { return NULL; } + if (bitmatrix == NULL) { return NULL; } rowelts = k * w; rowindex = 0; @@ -839,7 +839,11 @@ static int **jerasure_generate_decoding_schedule(int k, int m, int w, int *bitma row_ids = talloc(int, k+m); ind_to_row = talloc(int, k+m); - if (set_up_ids_for_scheduled_decoding(k, m, erasures, row_ids, ind_to_row) < 0) return NULL; + if (set_up_ids_for_scheduled_decoding(k, m, erasures, row_ids, ind_to_row) < 0) { + free(row_ids); + free(ind_to_row); + return NULL; + } /* Now, we're going to create one decoding matrix which is going to decode everything with one call. The hope is that the scheduler diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index eb5191f770e15..d63a0a1c11578 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -900,7 +900,7 @@ OSD::OSD(CephContext *cct_, int id, Messenger *internal_messenger, Messenger *ex replay_queue_lock("OSD::replay_queue_lock"), snap_trim_wq(this, cct->_conf->osd_snap_trim_thread_timeout, &disk_tp), scrub_wq(this, cct->_conf->osd_scrub_thread_timeout, &disk_tp), - scrub_finalize_wq(this, cct->_conf->osd_scrub_finalize_thread_timeout, &op_tp), + scrub_finalize_wq(cct->_conf->osd_scrub_finalize_thread_timeout, &op_tp), rep_scrub_wq(this, cct->_conf->osd_scrub_thread_timeout, &disk_tp), remove_wq(store, cct->_conf->osd_remove_thread_timeout, &disk_tp), next_removal_seq(0), @@ -2201,9 +2201,10 @@ void OSD::handle_pg_peering_evt( int role = osdmap->calc_pg_role(whoami, acting, acting.size()); pg_history_t history = info.history; - project_pg_history(info.pgid, history, epoch, up, acting); + bool valid_history = project_pg_history( + info.pgid, history, epoch, up, acting); - if (epoch < history.same_interval_since) { + if (!valid_history || epoch < history.same_interval_since) { dout(10) << "get_or_create_pg " << info.pgid << " acting changed in " << history.same_interval_since << " (msg from " << epoch << ")" << dendl; return; @@ -2388,7 +2389,7 @@ void OSD::calc_priors_during(pg_t pgid, epoch_t start, epoch_t end, set& ps * Fill in the passed history so you know same_interval_since, same_up_since, * and same_primary_since. */ -void OSD::project_pg_history(pg_t pgid, pg_history_t& h, epoch_t from, +bool OSD::project_pg_history(pg_t pgid, pg_history_t& h, epoch_t from, const vector& currentup, const vector& currentacting) { @@ -2402,7 +2403,11 @@ void OSD::project_pg_history(pg_t pgid, pg_history_t& h, epoch_t from, e > from; e--) { // verify during intermediate epoch (e-1) - OSDMapRef oldmap = get_map(e-1); + OSDMapRef oldmap = service.try_get_map(e-1); + if (!oldmap) { + dout(15) << __func__ << ": found map gap, returning false" << dendl; + return false; + } assert(oldmap->have_pg_pool(pgid.pool())); vector up, acting; @@ -2452,6 +2457,7 @@ void OSD::project_pg_history(pg_t pgid, pg_history_t& h, epoch_t from, } dout(15) << "project_pg_history end " << h << dendl; + return true; } // ------------------------------------- @@ -5441,22 +5447,6 @@ void OSD::advance_map(ObjectStore::Transaction& t, C_Contexts *tfin) waiting_for_pg.erase(p++); } } - map >::iterator q = - peering_wait_for_split.begin(); - while (q != peering_wait_for_split.end()) { - pg_t pgid = q->first; - - // am i still primary? - vector acting; - int nrep = osdmap->pg_to_acting_osds(pgid, acting); - int role = osdmap->calc_pg_role(whoami, acting, nrep); - if (role >= 0) { - ++q; // still me - } else { - dout(10) << " discarding waiting ops for " << pgid << dendl; - peering_wait_for_split.erase(q++); - } - } } void OSD::consume_map() @@ -5935,7 +5925,12 @@ void OSD::handle_pg_create(OpRequestRef op) utime_t now = ceph_clock_now(NULL); history.last_scrub_stamp = now; history.last_deep_scrub_stamp = now; - project_pg_history(pgid, history, created, up, acting); + bool valid_history = + project_pg_history(pgid, history, created, up, acting); + /* the pg creation message must have come from a mon and therefore + * cannot be on the other side of a map gap + */ + assert(valid_history); // register. creating_pgs[pgid].history = history; @@ -6547,9 +6542,11 @@ void OSD::handle_pg_query(OpRequestRef op) // same primary? pg_history_t history = it->second.history; - project_pg_history(pgid, history, it->second.epoch_sent, up, acting); + bool valid_history = + project_pg_history(pgid, history, it->second.epoch_sent, up, acting); - if (it->second.epoch_sent < history.same_interval_since) { + if (!valid_history || + it->second.epoch_sent < history.same_interval_since) { dout(10) << " pg " << pgid << " dne, and pg has changed in " << history.same_interval_since << " (msg from " << it->second.epoch_sent << ")" << dendl; @@ -6613,9 +6610,11 @@ void OSD::handle_pg_remove(OpRequestRef op) pg_history_t history = pg->info.history; vector up, acting; osdmap->pg_to_up_acting_osds(pgid, up, acting); - project_pg_history(pg->info.pgid, history, pg->get_osdmap()->get_epoch(), - up, acting); - if (history.same_interval_since <= m->get_epoch()) { + bool valid_history = + project_pg_history(pg->info.pgid, history, pg->get_osdmap()->get_epoch(), + up, acting); + if (valid_history && + history.same_interval_since <= m->get_epoch()) { assert(pg->get_primary() == m->get_source().num()); PGRef _pg(pg); _remove_pg(pg); diff --git a/src/osd/OSD.h b/src/osd/OSD.h index f7559da3be53a..11ad2b89399be 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -440,7 +440,7 @@ class OSDService { bool force_new); ObjecterDispatcher(OSDService *o) : Dispatcher(cct), osd(o) {} } objecter_dispatcher; - friend class ObjecterDispatcher; + friend struct ObjecterDispatcher; // -- Watch -- @@ -1191,8 +1191,12 @@ class OSD : public Dispatcher, void build_past_intervals_parallel(); void calc_priors_during(pg_t pgid, epoch_t start, epoch_t end, set& pset); - void project_pg_history(pg_t pgid, pg_history_t& h, epoch_t from, - const vector& lastup, const vector& lastacting); + + /// project pg history from from to now + bool project_pg_history( + pg_t pgid, pg_history_t& h, epoch_t from, + const vector& lastup, const vector& lastacting + ); ///< @return false if there was a map gap between from and now void wake_pg_waiters(pg_t pgid) { if (waiting_for_pg.count(pgid)) { @@ -1555,12 +1559,11 @@ class OSD : public Dispatcher, struct ScrubFinalizeWQ : public ThreadPool::WorkQueue { private: - OSD *osd; xlist scrub_finalize_queue; public: - ScrubFinalizeWQ(OSD *o, time_t ti, ThreadPool *tp) - : ThreadPool::WorkQueue("OSD::ScrubFinalizeWQ", ti, ti*10, tp), osd(o) {} + ScrubFinalizeWQ(time_t ti, ThreadPool *tp) + : ThreadPool::WorkQueue("OSD::ScrubFinalizeWQ", ti, ti*10, tp) {} bool _empty() { return scrub_finalize_queue.empty(); diff --git a/src/osd/PG.cc b/src/osd/PG.cc index e92013abdc733..8207a675bce4a 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -2399,13 +2399,7 @@ void PG::log_weirdness() << " != info.last_update " << info.last_update << "\n"; - if (pg_log.get_log().empty()) { - // shoudl it be? - if (pg_log.get_head() != pg_log.get_tail()) - osd->clog.error() << info.pgid - << " log bound mismatch, empty but (" << pg_log.get_tail() << "," - << pg_log.get_head() << "]\n"; - } else { + if (!pg_log.get_log().empty()) { // sloppy check if ((pg_log.get_log().log.begin()->version <= pg_log.get_tail())) osd->clog.error() << info.pgid @@ -4679,19 +4673,11 @@ ostream& operator<<(ostream& out, const PG& pg) pg.pg_log.get_head() != pg.info.last_update) out << " (info mismatch, " << pg.pg_log.get_log() << ")"; - if (pg.pg_log.get_log().empty()) { - // shoudl it be? - if (pg.pg_log.get_head().version - pg.pg_log.get_tail().version != 0) { - out << " (log bound mismatch, empty)"; - } - } else { - if ((pg.pg_log.get_log().log.begin()->version <= pg.pg_log.get_tail()) || // sloppy check - (pg.pg_log.get_log().log.rbegin()->version != pg.pg_log.get_head() && - !(pg.pg_log.get_head() == pg.pg_log.get_tail()))) { + if (!pg.pg_log.get_log().empty()) { + if ((pg.pg_log.get_log().log.begin()->version <= pg.pg_log.get_tail())) { out << " (log bound mismatch, actual=[" << pg.pg_log.get_log().log.begin()->version << "," << pg.pg_log.get_log().log.rbegin()->version << "]"; - //out << "len=" << pg.log.log.size(); out << ")"; } } diff --git a/src/osd/PGLog.cc b/src/osd/PGLog.cc index 130a8e0778f71..9f6ca1f70c3bc 100644 --- a/src/osd/PGLog.cc +++ b/src/osd/PGLog.cc @@ -360,7 +360,7 @@ void PGLog::rewind_divergent_log(ObjectStore::Transaction& t, eversion_t newhead } --p; mark_dirty_from(p->version); - if (p->version == newhead) { + if (p->version <= newhead) { ++p; divergent.splice(divergent.begin(), log.log, p, log.log.end()); break; @@ -422,8 +422,6 @@ void PGLog::merge_log(ObjectStore::Transaction& t, log.index(*to); dout(15) << *to << dendl; } - assert(to != olog.log.end() || - (olog.head == info.last_update)); // splice into our log. log.log.splice(log.log.begin(), diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc index 500042d16e3a4..10ab6a13df6ed 100644 --- a/src/osd/ReplicatedPG.cc +++ b/src/osd/ReplicatedPG.cc @@ -655,19 +655,38 @@ void ReplicatedPG::do_pg_op(OpRequestRef op) map::const_iterator missing_iter = pg_log.get_missing().missing.lower_bound(current); vector::iterator ls_iter = sentries.begin(); + hobject_t _max = hobject_t::get_max(); while (1) { - if (ls_iter == sentries.end()) { - break; - } + const hobject_t &mcand = + missing_iter == pg_log.get_missing().missing.end() ? + _max : + missing_iter->first; + const hobject_t &lcand = + ls_iter == sentries.end() ? + _max : + *ls_iter; hobject_t candidate; - if (missing_iter == pg_log.get_missing().missing.end() || - *ls_iter < missing_iter->first) { - candidate = *(ls_iter++); + if (mcand == lcand) { + candidate = mcand; + if (!mcand.is_max()) { + ls_iter++; + missing_iter++; + } + } else if (mcand < lcand) { + candidate = mcand; + assert(!mcand.is_max()); + ++missing_iter; } else { - candidate = (missing_iter++)->first; + candidate = lcand; + assert(!lcand.is_max()); + ++ls_iter; } + if (candidate >= next) { + break; + } + if (response.entries.size() == list_size) { next = candidate; break; @@ -8110,7 +8129,6 @@ int ReplicatedPG::recover_backfill( } PGBackend::RecoveryHandle *h = pgbackend->open_recovery_op(); - map > pushes; for (map >::iterator i = to_push.begin(); @@ -8142,6 +8160,17 @@ int ReplicatedPG::recover_backfill( assert(i->first > new_last_backfill); new_last_backfill = i->first; } + + /* If last_backfill is snapdir, we know that head necessarily cannot exist, + * therefore it's safe to bump the snap up to NOSNAP. This is necessary + * since we need avoid having SNAPDIR backfilled and HEAD not backfilled + * since a transaction on HEAD might change SNAPDIR + */ + if (new_last_backfill.is_snapdir()) + new_last_backfill = new_last_backfill.get_head(); + if (last_backfill_started.is_snapdir()) + last_backfill_started = last_backfill_started.get_head(); + assert(!pending_backfill_updates.empty() || new_last_backfill == last_backfill_started); if (pending_backfill_updates.empty() && diff --git a/src/osd/ReplicatedPG.h b/src/osd/ReplicatedPG.h index 2852cf40d3619..439c6a9e5c079 100644 --- a/src/osd/ReplicatedPG.h +++ b/src/osd/ReplicatedPG.h @@ -926,7 +926,7 @@ class ReplicatedPG : public PG, public PGBackend::Listener { void cancel_copy(CopyOpRef cop, bool requeue); void cancel_copy_ops(bool requeue); - friend class C_Copyfrom; + friend struct C_Copyfrom; // -- scrub -- virtual void _scrub(ScrubMap& map); diff --git a/src/osd/osd_types.cc b/src/osd/osd_types.cc index 05b83c4af21bd..0cb3c0c64897b 100644 --- a/src/osd/osd_types.cc +++ b/src/osd/osd_types.cc @@ -1062,7 +1062,7 @@ ostream& operator<<(ostream& out, const pg_pool_t& p) out << " max_bytes " << p.quota_max_bytes; if (p.quota_max_objects) out << " max_objects " << p.quota_max_objects; - if (p.tiers.size()) + if (!p.tiers.empty()) out << " tiers " << p.tiers; if (p.is_tier()) out << " tier_of " << p.tier_of; diff --git a/src/osdc/Objecter.cc b/src/osdc/Objecter.cc index d2c574d982e1e..734df5e9e6900 100644 --- a/src/osdc/Objecter.cc +++ b/src/osdc/Objecter.cc @@ -2406,7 +2406,6 @@ Objecter::RequestStateHook::RequestStateHook(Objecter *objecter) : bool Objecter::RequestStateHook::call(std::string command, cmdmap_t& cmdmap, std::string format, bufferlist& out) { - stringstream ss; Formatter *f = new_formatter(format); m_objecter->client_lock.Lock(); m_objecter->dump_requests(f); diff --git a/src/rbd.cc b/src/rbd.cc index 147eb2c5138e9..41cd243735520 100644 --- a/src/rbd.cc +++ b/src/rbd.cc @@ -1643,12 +1643,12 @@ static int do_kernel_add(const char *poolname, const char *imgname, } if (read_only) - oss << " ro"; + oss << " ro,"; else - oss << " rw"; + oss << " "; const char *user = g_conf->name.get_id().c_str(); - oss << ",name=" << user; + oss << "name=" << user; char key_name[strlen(user) + strlen("client.") + 1]; snprintf(key_name, sizeof(key_name), "client.%s", user); diff --git a/src/rgw/rgw_common.cc b/src/rgw/rgw_common.cc index 22e94cdccf9e6..2e69d532cd151 100644 --- a/src/rgw/rgw_common.cc +++ b/src/rgw/rgw_common.cc @@ -659,13 +659,12 @@ bool verify_object_permission(struct req_state *s, int perm) return verify_object_permission(s, s->bucket_acl, s->object_acl, perm); } -static char hex_to_num(char c) +class HexTable { - static char table[256]; - static bool initialized = false; - + char table[256]; - if (!initialized) { +public: + HexTable() { memset(table, -1, sizeof(table)); int i; for (i = '0'; i<='9'; i++) @@ -675,7 +674,16 @@ static char hex_to_num(char c) for (i = 'a'; i<='f'; i++) table[i] = i - 'a' + 0xa; } - return table[(int)c]; + + char to_num(char c) { + return table[(int)c]; + } +}; + +static char hex_to_num(char c) +{ + static HexTable hex_table; + return hex_table.to_num(c); } bool url_decode(string& src_str, string& dest_str) diff --git a/src/rgw/rgw_http_client.cc b/src/rgw/rgw_http_client.cc index 314e80b9ef26c..1c6b6d4d71ba0 100644 --- a/src/rgw/rgw_http_client.cc +++ b/src/rgw/rgw_http_client.cc @@ -234,7 +234,7 @@ static int do_curl_wait(CephContext *cct, CURLM *handle) int RGWHTTPClient::process_request(void *handle, bool wait_for_data, bool *done) { - multi_req_data *req_data = (multi_req_data *)handle; + multi_req_data *req_data = static_cast(handle); int still_running; int mstatus; @@ -282,7 +282,7 @@ int RGWHTTPClient::complete_request(void *handle) do { ret = process_request(handle, true, &done); } while (!done && !ret); - multi_req_data *req_data = (multi_req_data *)handle; + multi_req_data *req_data = static_cast(handle); delete req_data; return ret; diff --git a/src/rgw/rgw_main.cc b/src/rgw/rgw_main.cc index 5fbecf88cab0d..82568ff390924 100644 --- a/src/rgw/rgw_main.cc +++ b/src/rgw/rgw_main.cc @@ -65,7 +65,7 @@ using namespace std; -static sighandler_t sighandler_alrm; +static sig_t sighandler_alrm; class RGWProcess; diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index c750276596f83..bd73a239a4bf1 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -418,6 +418,11 @@ int RGWOp::verify_op_mask() return -EPERM; } + if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone.is_master) { + ldout(s->cct, 5) << "NOTICE: modify request to a non-master zone by a non-system user, permission denied" << dendl; + return -EPERM; + } + return 0; } diff --git a/src/rgw/rgw_quota.cc b/src/rgw/rgw_quota.cc index 66609ca723c28..89611f5858769 100644 --- a/src/rgw/rgw_quota.cc +++ b/src/rgw/rgw_quota.cc @@ -126,7 +126,6 @@ class AsyncRefreshHandler : public RGWGetBucketStats_CB { int AsyncRefreshHandler::init_fetch() { ldout(store->ctx(), 20) << "initiating async quota refresh for bucket=" << bucket << dendl; - map bucket_stats; int r = store->get_bucket_stats_async(bucket, this); if (r < 0) { ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl; diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 4d6f8ef45301a..55d4b92bcbdb0 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -362,6 +362,10 @@ int RGWZoneParams::init(CephContext *cct, RGWRados *store, RGWRegion& region) return -EIO; } + is_master = (name == region.master_zone) || (region.master_zone.empty() && name == "default"); + + ldout(cct, 2) << "zone " << name << " is " << (is_master ? "" : "NOT ") << "master" << dendl; + return 0; } diff --git a/src/rgw/rgw_rados.h b/src/rgw/rgw_rados.h index 874492ffe692d..476572ce3f6c9 100644 --- a/src/rgw/rgw_rados.h +++ b/src/rgw/rgw_rados.h @@ -428,11 +428,14 @@ struct RGWZoneParams { rgw_bucket user_uid_pool; string name; + bool is_master; RGWAccessKey system_key; map placement_pools; + RGWZoneParams() : is_master(false) {} + static int get_pool_name(CephContext *cct, string *pool_name); void init_name(CephContext *cct, RGWRegion& region); int init(CephContext *cct, RGWRados *store, RGWRegion& region); diff --git a/src/rgw/rgw_resolve.cc b/src/rgw/rgw_resolve.cc index 9a8b14b2e0185..471ac3f167816 100644 --- a/src/rgw/rgw_resolve.cc +++ b/src/rgw/rgw_resolve.cc @@ -3,6 +3,12 @@ #include #include +#include "acconfig.h" + +#ifdef HAVE_ARPA_NAMESER_COMPAT_H +#include +#endif + #include "rgw_common.h" #include "rgw_resolve.h" diff --git a/src/rgw/rgw_rest_metadata.cc b/src/rgw/rgw_rest_metadata.cc index 5036235ebd2b6..afd5c7b6cc055 100644 --- a/src/rgw/rgw_rest_metadata.cc +++ b/src/rgw/rgw_rest_metadata.cc @@ -31,7 +31,6 @@ static inline void frame_metadata_key(req_state *s, string& out) { bool exists; string key = s->info.args.get("key", &exists); - string metadata_key; string section; if (!s->bucket_name_str.empty()) { section = s->bucket_name_str; diff --git a/src/rgw/rgw_rest_replica_log.cc b/src/rgw/rgw_rest_replica_log.cc index 600a8edb78c5b..2543f32fba6e8 100644 --- a/src/rgw/rgw_rest_replica_log.cc +++ b/src/rgw/rgw_rest_replica_log.cc @@ -171,7 +171,9 @@ static int bucket_instance_to_bucket(RGWRados *store, string& bucket_instance, r int r = store->get_bucket_instance_info(NULL, bucket_instance, bucket_info, &mtime, NULL); if (r < 0) { - dout(5) << "could not get bucket instance info for bucket=" << bucket_instance << dendl; + dout(5) << "could not get bucket instance info for bucket=" << bucket_instance << ": " << cpp_strerror(r) << dendl; + if (r == -ENOENT) + return r; return -EINVAL; } diff --git a/src/rgw/rgw_user.cc b/src/rgw/rgw_user.cc index dc529e3d48d4b..e4462ec11e05e 100644 --- a/src/rgw/rgw_user.cc +++ b/src/rgw/rgw_user.cc @@ -2358,7 +2358,7 @@ class RGWUserMetadataHandler : public RGWMetadataHandler { } int list_keys_next(void *handle, int max, list& keys, bool *truncated) { - list_keys_info *info = (list_keys_info *)handle; + list_keys_info *info = static_cast(handle); string no_filter; @@ -2387,7 +2387,7 @@ class RGWUserMetadataHandler : public RGWMetadataHandler { } void list_keys_complete(void *handle) { - list_keys_info *info = (list_keys_info *)handle; + list_keys_info *info = static_cast(handle); delete info; } }; diff --git a/src/test/Makefile.am b/src/test/Makefile.am index 0ef75ac87ea43..da9f33b3d42d2 100644 --- a/src/test/Makefile.am +++ b/src/test/Makefile.am @@ -859,6 +859,13 @@ bin_DEBUGPROGRAMS += ceph_test_keyvaluedb_iterators ceph_test_cfuse_cache_invalidate_SOURCES = test/test_cfuse_cache_invalidate.cc bin_DEBUGPROGRAMS += ceph_test_cfuse_cache_invalidate +ceph_test_c_headers_SOURCES = test/test_c_headers.c +ceph_test_c_headers_LDADD = $(LIBRADOS) $(LIBCEPHFS) +bin_DEBUGPROGRAMS += ceph_test_c_headers + +ceph_test_get_blkdev_size_SOURCES = test/test_get_blkdev_size.cc +ceph_test_get_blkdev_size_LDADD = $(LIBCOMMON) +bin_DEBUGPROGRAMS += ceph_test_get_blkdev_size noinst_HEADERS += \ test/osd/RadosModel.h \ diff --git a/src/test/ceph_crypto.cc b/src/test/ceph_crypto.cc index 2c934fa848c3c..11d410182a4d6 100644 --- a/src/test/ceph_crypto.cc +++ b/src/test/ceph_crypto.cc @@ -135,6 +135,8 @@ void do_simple_crypto() { exit(0); } +#if GTEST_HAS_DEATH_TEST TEST_F(ForkDeathTest, MD5) { ASSERT_EXIT(do_simple_crypto(), ::testing::ExitedWithCode(0), "^$"); } +#endif //GTEST_HAS_DEATH_TEST diff --git a/src/test/cli/osdmaptool/clobber.t b/src/test/cli/osdmaptool/clobber.t index 9bbe4d4ceeb7f..bbec5f0de636a 100644 --- a/src/test/cli/osdmaptool/clobber.t +++ b/src/test/cli/osdmaptool/clobber.t @@ -3,6 +3,7 @@ osdmaptool: writing epoch 1 to myosdmap $ ORIG_FSID="$(osdmaptool --print myosdmap|grep ^fsid)" + osdmaptool: osdmap file 'myosdmap' $ osdmaptool --createsimple 3 myosdmap osdmaptool: osdmap file 'myosdmap' @@ -27,6 +28,7 @@ $ NEW_FSID="$(osdmaptool --print myosdmap|grep ^fsid)" + osdmaptool: osdmap file 'myosdmap' $ [ "$ORIG_FSID" = "$NEW_FSID" ] $ osdmaptool --createsimple 1 --clobber myosdmap @@ -49,6 +51,7 @@ $ NEW_FSID="$(osdmaptool --print myosdmap|grep ^fsid)" + osdmaptool: osdmap file 'myosdmap' #TODO --clobber should probably set new fsid, remove the [1] $ [ "$ORIG_FSID" != "$NEW_FSID" ] [1] diff --git a/src/test/cli/osdmaptool/create-racks.t b/src/test/cli/osdmaptool/create-racks.t index 92bc995a227c5..f686ef4c05131 100644 --- a/src/test/cli/osdmaptool/create-racks.t +++ b/src/test/cli/osdmaptool/create-racks.t @@ -1,4 +1,5 @@ $ osdmaptool --create-from-conf om -c $TESTDIR/ceph.conf.withracks > /dev/null + osdmaptool: osdmap file 'om' $ osdmaptool --test-map-pg 0.0 om osdmaptool: osdmap file 'om' parsed '0.0' -> 0.0 diff --git a/src/test/common/test_sharedptr_registry.cc b/src/test/common/test_sharedptr_registry.cc index b1713a9bd9ff6..6121b6335b8d3 100644 --- a/src/test/common/test_sharedptr_registry.cc +++ b/src/test/common/test_sharedptr_registry.cc @@ -32,7 +32,9 @@ using namespace std::tr1; class SharedPtrRegistryTest : public SharedPtrRegistry { public: Mutex &get_lock() { return lock; } - map > &get_contents() { return contents; } + map, int*> > &get_contents() { + return contents; + } }; class SharedPtrRegistry_all : public ::testing::Test { @@ -125,9 +127,9 @@ TEST_F(SharedPtrRegistry_all, wait_lookup_or_create) { unsigned int key = 1; { shared_ptr ptr(new int); - registry.get_contents()[key] = ptr; + registry.get_contents()[key] = make_pair(ptr, ptr.get()); } - EXPECT_FALSE(registry.get_contents()[key].lock()); + EXPECT_FALSE(registry.get_contents()[key].first.lock()); Thread_wait t(registry, key, 0, Thread_wait::LOOKUP_OR_CREATE); t.create(); @@ -145,9 +147,9 @@ TEST_F(SharedPtrRegistry_all, wait_lookup_or_create) { int value = 3; { shared_ptr ptr(new int); - registry.get_contents()[key] = ptr; + registry.get_contents()[key] = make_pair(ptr, ptr.get()); } - EXPECT_FALSE(registry.get_contents()[key].lock()); + EXPECT_FALSE(registry.get_contents()[key].first.lock()); Thread_wait t(registry, key, value, Thread_wait::LOOKUP_OR_CREATE); t.create(); @@ -188,9 +190,9 @@ TEST_F(SharedPtrRegistry_all, wait_lookup) { int value = 2; { shared_ptr ptr(new int); - registry.get_contents()[key] = ptr; + registry.get_contents()[key] = make_pair(ptr, ptr.get()); } - EXPECT_FALSE(registry.get_contents()[key].lock()); + EXPECT_FALSE(registry.get_contents()[key].first.lock()); Thread_wait t(registry, key, value, Thread_wait::LOOKUP); t.create(); @@ -221,7 +223,7 @@ TEST_F(SharedPtrRegistry_all, get_next) { // entries with expired pointers are silentely ignored const unsigned int key_gone = 222; - registry.get_contents()[key_gone] = shared_ptr(); + registry.get_contents()[key_gone] = make_pair(shared_ptr(), (int*)0); const unsigned int key1 = 111; shared_ptr ptr1 = registry.lookup_or_create(key1); @@ -258,6 +260,39 @@ TEST_F(SharedPtrRegistry_all, get_next) { } } +TEST_F(SharedPtrRegistry_all, remove) { + { + SharedPtrRegistryTest registry; + const unsigned int key1 = 1; + shared_ptr ptr1 = registry.lookup_or_create(key1); + *ptr1 = 400; + registry.remove(key1); + + shared_ptr ptr2 = registry.lookup_or_create(key1); + *ptr2 = 500; + + ptr1 = shared_ptr(); + shared_ptr res = registry.lookup(key1); + assert(res); + assert(res == ptr2); + assert(*res == 500); + } + { + SharedPtrRegistryTest registry; + const unsigned int key1 = 1; + shared_ptr ptr1 = registry.lookup_or_create(key1, 400); + registry.remove(key1); + + shared_ptr ptr2 = registry.lookup_or_create(key1, 500); + + ptr1 = shared_ptr(); + shared_ptr res = registry.lookup(key1); + assert(res); + assert(res == ptr2); + assert(*res == 500); + } +} + class SharedPtrRegistry_destructor : public ::testing::Test { public: diff --git a/src/test/libcephfs/readdir_r_cb.cc b/src/test/libcephfs/readdir_r_cb.cc index 788260b3e0ffd..4a99f102da221 100644 --- a/src/test/libcephfs/readdir_r_cb.cc +++ b/src/test/libcephfs/readdir_r_cb.cc @@ -54,4 +54,8 @@ TEST(LibCephFS, ReaddirRCB) { ASSERT_LE(0, ceph_opendir(cmount, c_dir, &dirp)); ASSERT_EQ(5, ceph_getdnames(cmount, dirp, buf, 6)); ASSERT_EQ(4, ceph_getdnames(cmount, dirp, buf, 6)); + + // free cmount after finishing testing + ASSERT_EQ(0, ceph_unmount(cmount)); + ASSERT_EQ(0, ceph_release(cmount)); } diff --git a/src/test/librados/list.cc b/src/test/librados/list.cc index 1ea56c295a486..5adbe07beb9bb 100644 --- a/src/test/librados/list.cc +++ b/src/test/librados/list.cc @@ -1,3 +1,4 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- #include "include/rados/librados.h" #include "include/rados/librados.hpp" #include "test/librados/test.h" @@ -20,9 +21,12 @@ TEST(LibRadosList, ListObjects) { rados_list_ctx_t ctx; ASSERT_EQ(0, rados_objects_list_open(ioctx, &ctx)); const char *entry; - ASSERT_EQ(0, rados_objects_list_next(ctx, &entry, NULL)); - ASSERT_EQ(std::string(entry), "foo"); - ASSERT_EQ(-ENOENT, rados_objects_list_next(ctx, &entry, NULL)); + bool foundit = false; + while (rados_objects_list_next(ctx, &entry, NULL) != -ENOENT) { + foundit = true; + ASSERT_EQ(std::string(entry), "foo"); + } + ASSERT_TRUE(foundit); rados_objects_list_close(ctx); rados_ioctx_destroy(ioctx); ASSERT_EQ(0, destroy_one_pool(pool_name, &cluster)); @@ -40,10 +44,13 @@ TEST(LibRadosList, ListObjectsPP) { bl1.append(buf, sizeof(buf)); ASSERT_EQ((int)sizeof(buf), ioctx.write("foo", bl1, sizeof(buf), 0)); ObjectIterator iter(ioctx.objects_begin()); - ASSERT_EQ((iter == ioctx.objects_end()), false); - ASSERT_EQ((*iter).first, "foo"); - ++iter; - ASSERT_EQ(true, (iter == ioctx.objects_end())); + bool foundit = false; + while (iter != ioctx.objects_end()) { + foundit = true; + ASSERT_EQ((*iter).first, "foo"); + ++iter; + } + ASSERT_TRUE(foundit); ioctx.close(); ASSERT_EQ(0, destroy_one_pool_pp(pool_name, cluster)); } @@ -51,12 +58,18 @@ TEST(LibRadosList, ListObjectsPP) { static void check_list(std::set& myset, rados_list_ctx_t& ctx) { const char *entry; - while(!myset.empty()) { - ASSERT_EQ(0, rados_objects_list_next(ctx, &entry, NULL)); - ASSERT_TRUE(myset.end() != myset.find(std::string(entry))); + std::set orig_set(myset); + /** + * During splitting, we might see duplicate items. + * We assert that every object returned is in myset and that + * we don't hit ENOENT until we have hit every item in myset + * at least once. + */ + while (rados_objects_list_next(ctx, &entry, NULL) != -ENOENT) { + ASSERT_TRUE(orig_set.end() != orig_set.find(std::string(entry))); myset.erase(std::string(entry)); } - ASSERT_EQ(-ENOENT, rados_objects_list_next(ctx, &entry, NULL)); + ASSERT_TRUE(myset.empty()); } TEST(LibRadosList, ListObjectsNS) { @@ -117,18 +130,19 @@ TEST(LibRadosList, ListObjectsNS) { static void check_listpp(std::set& myset, IoCtx& ioctx) { ObjectIterator iter(ioctx.objects_begin()); - if (myset.empty()) { - ASSERT_EQ((iter == ioctx.objects_end()), true); - return; - } - - while(!myset.empty()) { - ASSERT_EQ((iter == ioctx.objects_end()), false); - ASSERT_TRUE(myset.end() != myset.find(std::string((*iter).first))); + std::set orig_set(myset); + /** + * During splitting, we might see duplicate items. + * We assert that every object returned is in myset and that + * we don't hit ENOENT until we have hit every item in myset + * at least once. + */ + while (iter != ioctx.objects_end()) { + ASSERT_TRUE(orig_set.end() != orig_set.find(std::string((*iter).first))); myset.erase(std::string((*iter).first)); ++iter; } - ASSERT_EQ((iter == ioctx.objects_end()), true); + ASSERT_TRUE(myset.empty()); } TEST(LibRadosList, ListObjectsPPNS) { diff --git a/src/test/old/test_seek_read.c b/src/test/old/test_seek_read.c index aedb32a2ae0b5..1ea3b750b455f 100644 --- a/src/test/old/test_seek_read.c +++ b/src/test/old/test_seek_read.c @@ -38,7 +38,7 @@ int main(int argc, char **argv) utime_t start = ceph_clock_now(g_ceph_context); for (int i=0; i ls; ASSERT_DEATH(index.collection_list_partial(hoid, 0, 0, 0, &ls, &hoid), "0"); } +#endif //GTEST_HAS_DEATH_TEST TEST(FlatIndex, created_unlink) { coll_t collection("ABC"); diff --git a/src/test/osd/ErasureCodeExample.h b/src/test/osd/ErasureCodeExample.h index 0fd55187559f4..07694ea409aae 100644 --- a/src/test/osd/ErasureCodeExample.h +++ b/src/test/osd/ErasureCodeExample.h @@ -76,7 +76,7 @@ class ErasureCodeExample : public ErasureCodeInterface { set available_chunks; for (map::const_iterator i = c2c.begin(); i != c2c.end(); - i++) + ++i) available_chunks.insert(i->first); return minimum_to_decode(want_to_read, available_chunks, minimum); } @@ -109,7 +109,7 @@ class ErasureCodeExample : public ErasureCodeInterface { const bufferptr ptr = out.buffers().front(); for (set::iterator j = want_to_encode.begin(); j != want_to_encode.end(); - j++) { + ++j) { bufferptr chunk(ptr, (*j) * chunk_length, chunk_length); (*encoded)[*j].push_front(chunk); } @@ -125,7 +125,7 @@ class ErasureCodeExample : public ErasureCodeInterface { unsigned chunk_length = (*chunks.begin()).second.length(); for (set::iterator i = want_to_read.begin(); i != want_to_read.end(); - i++) { + ++i) { if (chunks.find(*i) != chunks.end()) { // // If the chunk is available, just copy the bufferptr pointer @@ -146,7 +146,7 @@ class ErasureCodeExample : public ErasureCodeInterface { bufferptr chunk(chunk_length); map::const_iterator k = chunks.begin(); const char *a = k->second.buffers().front().c_str(); - k++; + ++k; const char *b = k->second.buffers().front().c_str(); for (unsigned j = 0; j < chunk_length; j++) { chunk[j] = a[j] ^ b[j]; diff --git a/src/test/osd/RadosModel.h b/src/test/osd/RadosModel.h index ac2f336f1101e..80bcf00a6d785 100644 --- a/src/test/osd/RadosModel.h +++ b/src/test/osd/RadosModel.h @@ -18,6 +18,7 @@ #include "Object.h" #include "TestOpStat.h" #include "test/librados/test.h" +#include "common/sharedptr_registry.hpp" #ifndef RADOSMODEL_H #define RADOSMODEL_H @@ -143,7 +144,7 @@ class RadosTestContext { map > pool_obj_cont; set oid_in_use; set oid_not_in_use; - set snaps_in_use; + SharedPtrRegistry snaps_in_use; int current_snap; string pool_name; librados::IoCtx io_ctx; @@ -1321,15 +1322,15 @@ class RollbackOp : public TestOp { bool done; librados::ObjectWriteOperation op; librados::AioCompletion *comp; + std::tr1::shared_ptr in_use; RollbackOp(int n, RadosTestContext *context, const string &_oid, - int snap, TestOpStat *stat = 0) : TestOp(n, context, stat), oid(_oid), - roll_back_to(snap), done(false) + roll_back_to(-1), done(false) {} void _begin() @@ -1340,9 +1341,23 @@ class RollbackOp : public TestOp { context->state_lock.Unlock(); return; } + + if (context->snaps.empty()) { + context->kick(); + context->state_lock.Unlock(); + done = true; + return; + } + context->oid_in_use.insert(oid); context->oid_not_in_use.erase(oid); - context->snaps_in_use.insert(roll_back_to); + + roll_back_to = rand_choose(context->snaps)->first; + in_use = context->snaps_in_use.lookup_or_create( + roll_back_to, + roll_back_to); + + cout << "rollback oid " << oid << " to " << roll_back_to << std::endl; context->roll_back(oid, roll_back_to); uint64_t snap = context->snaps[roll_back_to]; @@ -1371,7 +1386,7 @@ class RollbackOp : public TestOp { context->update_object_version(oid, comp->get_version64()); context->oid_in_use.erase(oid); context->oid_not_in_use.insert(oid); - context->snaps_in_use.erase(roll_back_to); + in_use = std::tr1::shared_ptr(); context->kick(); } diff --git a/src/test/osd/TestRados.cc b/src/test/osd/TestRados.cc index 842f9d2bca3e1..20a4f8209cc55 100644 --- a/src/test/osd/TestRados.cc +++ b/src/test/osd/TestRados.cc @@ -116,21 +116,17 @@ class WeightedTestGenerator : public TestOpGenerator } while (true) { int snap = rand_choose(context.snaps)->first; - if (context.snaps_in_use.count(snap)) + if (context.snaps_in_use.lookup(snap)) continue; // in use; try again! cout << "snap_remove snap " << snap << std::endl; return new SnapRemoveOp(m_op, &context, snap, m_stats); } case TEST_OP_ROLLBACK: - if (context.snaps.empty()) { - return NULL; - } { - int snap = rand_choose(context.snaps)->first; string oid = *(rand_choose(context.oid_not_in_use)); - cout << "rollback oid " << oid << " to " << snap << std::endl; - return new RollbackOp(m_op, &context, oid, snap); + cout << "rollback oid " << oid << std::endl; + return new RollbackOp(m_op, &context, oid); } case TEST_OP_SETATTR: diff --git a/src/test/test_c_headers.c b/src/test/test_c_headers.c new file mode 100644 index 0000000000000..0f41966acae22 --- /dev/null +++ b/src/test/test_c_headers.c @@ -0,0 +1,22 @@ +#include "include/cephfs/libcephfs.h" +#include "include/rados/librados.h" + +#ifdef __cplusplus +#error "test invalid: only use C mode" +#endif + +int main(int argc, char **argv) +{ + int ret; + (void)ret; // squash unused warning + + /* librados.h */ + rados_t cluster; + ret = rados_create(&cluster, NULL); + + /* libcephfs.h */ + struct ceph_mount_info *cmount; + ret = ceph_create(&cmount, NULL); + + return 0; +} diff --git a/src/test/test_get_blkdev_size.cc b/src/test/test_get_blkdev_size.cc new file mode 100644 index 0000000000000..ba28f1cd62f10 --- /dev/null +++ b/src/test/test_get_blkdev_size.cc @@ -0,0 +1,35 @@ +#include +#include +#include +#include +#include +#include +#include +#include "common/blkdev.h" + +int main(int argc, char **argv) +{ + int fd, ret; + int64_t size; + + if (argc != 2) { + fprintf(stderr, "usage: %s \n", argv[0]); + return -1; + } + + fd = open(argv[1], O_RDONLY); + if (fd < 0) { + perror("open"); + return -1; + } + + ret = get_block_device_size(fd, &size); + if (ret < 0) { + fprintf(stderr, "get_block_device_size: %s\n", strerror(-ret)); + return -1; + } + + fprintf(stdout, "%" PRId64, size); + + return 0; +} diff --git a/src/tools/crushtool.cc b/src/tools/crushtool.cc index 03c83f2415673..caebf2d56207e 100644 --- a/src/tools/crushtool.cc +++ b/src/tools/crushtool.cc @@ -186,7 +186,7 @@ int main(int argc, const char **argv) CrushWrapper crush; - CrushTester tester(crush, cerr, 1); + CrushTester tester(crush, cerr); vector empty_args; // we use -c, don't confuse the generic arg parsing global_init(NULL, empty_args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, diff --git a/src/tools/osdmaptool.cc b/src/tools/osdmaptool.cc index 2e55026076c32..edd31284c4d4f 100644 --- a/src/tools/osdmaptool.cc +++ b/src/tools/osdmaptool.cc @@ -156,7 +156,7 @@ int main(int argc, const char **argv) OSDMap osdmap; bufferlist bl; - cout << me << ": osdmap file '" << fn << "'" << std::endl; + cerr << me << ": osdmap file '" << fn << "'" << std::endl; int r = 0; struct stat st; diff --git a/src/upstart/ceph-osd.conf b/src/upstart/ceph-osd.conf index a2f659460da78..91ac22c5188c2 100644 --- a/src/upstart/ceph-osd.conf +++ b/src/upstart/ceph-osd.conf @@ -19,6 +19,9 @@ pre-start script if [ "${update:-1}" = "1" -o "{$update:-1}" = "true" ]; then # update location in crush hook="$(ceph-conf --cluster=${cluster:-ceph} --name=osd.$id --lookup osd_crush_location_hook || :)" + if [ -z "$hook" ]; then + hook="/usr/bin/ceph-crush-location" + fi location="$($hook --cluster ${cluster:-ceph} --id $id --type osd)" weight="$(ceph-conf --cluster=${cluster:-ceph} --name=osd.$id --lookup osd_crush_initial_weight || :)" defaultweight=`df /var/lib/ceph/osd/${cluster:-ceph}-$id/ | tail -1 | awk '{ d= $2/1073741824 ; r = sprintf("%.2f", d); print r }'`